Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/atomic"
      9 	"runtime/internal/sys"
     10 	"unsafe"
     11 )
     12 
     13 var buildVersion = sys.TheVersion
     14 
     15 // Goroutine scheduler
     16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
     17 //
     18 // The main concepts are:
     19 // G - goroutine.
     20 // M - worker thread, or machine.
     21 // P - processor, a resource that is required to execute Go code.
     22 //     M must have an associated P to execute Go code, however it can be
     23 //     blocked or in a syscall w/o an associated P.
     24 //
     25 // Design doc at https://golang.org/s/go11sched.
     26 
     27 // Worker thread parking/unparking.
     28 // We need to balance between keeping enough running worker threads to utilize
     29 // available hardware parallelism and parking excessive running worker threads
     30 // to conserve CPU resources and power. This is not simple for two reasons:
     31 // (1) scheduler state is intentionally distributed (in particular, per-P work
     32 // queues), so it is not possible to compute global predicates on fast paths;
     33 // (2) for optimal thread management we would need to know the future (don't park
     34 // a worker thread when a new goroutine will be readied in near future).
     35 //
     36 // Three rejected approaches that would work badly:
     37 // 1. Centralize all scheduler state (would inhibit scalability).
     38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
     39 //    is a spare P, unpark a thread and handoff it the thread and the goroutine.
     40 //    This would lead to thread state thrashing, as the thread that readied the
     41 //    goroutine can be out of work the very next moment, we will need to park it.
     42 //    Also, it would destroy locality of computation as we want to preserve
     43 //    dependent goroutines on the same thread; and introduce additional latency.
     44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
     45 //    idle P, but don't do handoff. This would lead to excessive thread parking/
     46 //    unparking as the additional threads will instantly park without discovering
     47 //    any work to do.
     48 //
     49 // The current approach:
     50 // We unpark an additional thread when we ready a goroutine if (1) there is an
     51 // idle P and there are no "spinning" worker threads. A worker thread is considered
     52 // spinning if it is out of local work and did not find work in global run queue/
     53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
     54 // Threads unparked this way are also considered spinning; we don't do goroutine
     55 // handoff so such threads are out of work initially. Spinning threads do some
     56 // spinning looking for work in per-P run queues before parking. If a spinning
     57 // thread finds work it takes itself out of the spinning state and proceeds to
     58 // execution. If it does not find work it takes itself out of the spinning state
     59 // and then parks.
     60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
     61 // new threads when readying goroutines. To compensate for that, if the last spinning
     62 // thread finds work and stops spinning, it must unpark a new spinning thread.
     63 // This approach smooths out unjustified spikes of thread unparking,
     64 // but at the same time guarantees eventual maximal CPU parallelism utilization.
     65 //
     66 // The main implementation complication is that we need to be very careful during
     67 // spinning->non-spinning thread transition. This transition can race with submission
     68 // of a new goroutine, and either one part or another needs to unpark another worker
     69 // thread. If they both fail to do that, we can end up with semi-persistent CPU
     70 // underutilization. The general pattern for goroutine readying is: submit a goroutine
     71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
     72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
     73 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
     74 // Note that all this complexity does not apply to global run queue as we are not
     75 // sloppy about thread unparking when submitting to global queue. Also see comments
     76 // for nmspinning manipulation.
     77 
     78 var (
     79 	m0           m
     80 	g0           g
     81 	raceprocctx0 uintptr
     82 )
     83 
     84 //go:linkname runtime_init runtime.init
     85 func runtime_init()
     86 
     87 //go:linkname main_init main.init
     88 func main_init()
     89 
     90 // main_init_done is a signal used by cgocallbackg that initialization
     91 // has been completed. It is made before _cgo_notify_runtime_init_done,
     92 // so all cgo calls can rely on it existing. When main_init is complete,
     93 // it is closed, meaning cgocallbackg can reliably receive from it.
     94 var main_init_done chan bool
     95 
     96 //go:linkname main_main main.main
     97 func main_main()
     98 
     99 // mainStarted indicates that the main M has started.
    100 var mainStarted bool
    101 
    102 // runtimeInitTime is the nanotime() at which the runtime started.
    103 var runtimeInitTime int64
    104 
    105 // Value to use for signal mask for newly created M's.
    106 var initSigmask sigset
    107 
    108 // The main goroutine.
    109 func main() {
    110 	g := getg()
    111 
    112 	// Racectx of m0->g0 is used only as the parent of the main goroutine.
    113 	// It must not be used for anything else.
    114 	g.m.g0.racectx = 0
    115 
    116 	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
    117 	// Using decimal instead of binary GB and MB because
    118 	// they look nicer in the stack overflow failure message.
    119 	if sys.PtrSize == 8 {
    120 		maxstacksize = 1000000000
    121 	} else {
    122 		maxstacksize = 250000000
    123 	}
    124 
    125 	// Allow newproc to start new Ms.
    126 	mainStarted = true
    127 
    128 	systemstack(func() {
    129 		newm(sysmon, nil)
    130 	})
    131 
    132 	// Lock the main goroutine onto this, the main OS thread,
    133 	// during initialization. Most programs won't care, but a few
    134 	// do require certain calls to be made by the main thread.
    135 	// Those can arrange for main.main to run in the main thread
    136 	// by calling runtime.LockOSThread during initialization
    137 	// to preserve the lock.
    138 	lockOSThread()
    139 
    140 	if g.m != &m0 {
    141 		throw("runtime.main not on m0")
    142 	}
    143 
    144 	runtime_init() // must be before defer
    145 	if nanotime() == 0 {
    146 		throw("nanotime returning zero")
    147 	}
    148 
    149 	// Defer unlock so that runtime.Goexit during init does the unlock too.
    150 	needUnlock := true
    151 	defer func() {
    152 		if needUnlock {
    153 			unlockOSThread()
    154 		}
    155 	}()
    156 
    157 	// Record when the world started. Must be after runtime_init
    158 	// because nanotime on some platforms depends on startNano.
    159 	runtimeInitTime = nanotime()
    160 
    161 	gcenable()
    162 
    163 	main_init_done = make(chan bool)
    164 	if iscgo {
    165 		if _cgo_thread_start == nil {
    166 			throw("_cgo_thread_start missing")
    167 		}
    168 		if GOOS != "windows" {
    169 			if _cgo_setenv == nil {
    170 				throw("_cgo_setenv missing")
    171 			}
    172 			if _cgo_unsetenv == nil {
    173 				throw("_cgo_unsetenv missing")
    174 			}
    175 		}
    176 		if _cgo_notify_runtime_init_done == nil {
    177 			throw("_cgo_notify_runtime_init_done missing")
    178 		}
    179 		// Start the template thread in case we enter Go from
    180 		// a C-created thread and need to create a new thread.
    181 		startTemplateThread()
    182 		cgocall(_cgo_notify_runtime_init_done, nil)
    183 	}
    184 
    185 	fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
    186 	fn()
    187 	close(main_init_done)
    188 
    189 	needUnlock = false
    190 	unlockOSThread()
    191 
    192 	if isarchive || islibrary {
    193 		// A program compiled with -buildmode=c-archive or c-shared
    194 		// has a main, but it is not executed.
    195 		return
    196 	}
    197 	fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
    198 	fn()
    199 	if raceenabled {
    200 		racefini()
    201 	}
    202 
    203 	// Make racy client program work: if panicking on
    204 	// another goroutine at the same time as main returns,
    205 	// let the other goroutine finish printing the panic trace.
    206 	// Once it does, it will exit. See issues 3934 and 20018.
    207 	if atomic.Load(&runningPanicDefers) != 0 {
    208 		// Running deferred functions should not take long.
    209 		for c := 0; c < 1000; c++ {
    210 			if atomic.Load(&runningPanicDefers) == 0 {
    211 				break
    212 			}
    213 			Gosched()
    214 		}
    215 	}
    216 	if atomic.Load(&panicking) != 0 {
    217 		gopark(nil, nil, "panicwait", traceEvGoStop, 1)
    218 	}
    219 
    220 	exit(0)
    221 	for {
    222 		var x *int32
    223 		*x = 0
    224 	}
    225 }
    226 
    227 // os_beforeExit is called from os.Exit(0).
    228 //go:linkname os_beforeExit os.runtime_beforeExit
    229 func os_beforeExit() {
    230 	if raceenabled {
    231 		racefini()
    232 	}
    233 }
    234 
    235 // start forcegc helper goroutine
    236 func init() {
    237 	go forcegchelper()
    238 }
    239 
    240 func forcegchelper() {
    241 	forcegc.g = getg()
    242 	for {
    243 		lock(&forcegc.lock)
    244 		if forcegc.idle != 0 {
    245 			throw("forcegc: phase error")
    246 		}
    247 		atomic.Store(&forcegc.idle, 1)
    248 		goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
    249 		// this goroutine is explicitly resumed by sysmon
    250 		if debug.gctrace > 0 {
    251 			println("GC forced")
    252 		}
    253 		// Time-triggered, fully concurrent.
    254 		gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
    255 	}
    256 }
    257 
    258 //go:nosplit
    259 
    260 // Gosched yields the processor, allowing other goroutines to run. It does not
    261 // suspend the current goroutine, so execution resumes automatically.
    262 func Gosched() {
    263 	mcall(gosched_m)
    264 }
    265 
    266 // goschedguarded yields the processor like gosched, but also checks
    267 // for forbidden states and opts out of the yield in those cases.
    268 //go:nosplit
    269 func goschedguarded() {
    270 	mcall(goschedguarded_m)
    271 }
    272 
    273 // Puts the current goroutine into a waiting state and calls unlockf.
    274 // If unlockf returns false, the goroutine is resumed.
    275 // unlockf must not access this G's stack, as it may be moved between
    276 // the call to gopark and the call to unlockf.
    277 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
    278 	mp := acquirem()
    279 	gp := mp.curg
    280 	status := readgstatus(gp)
    281 	if status != _Grunning && status != _Gscanrunning {
    282 		throw("gopark: bad g status")
    283 	}
    284 	mp.waitlock = lock
    285 	mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
    286 	gp.waitreason = reason
    287 	mp.waittraceev = traceEv
    288 	mp.waittraceskip = traceskip
    289 	releasem(mp)
    290 	// can't do anything that might move the G between Ms here.
    291 	mcall(park_m)
    292 }
    293 
    294 // Puts the current goroutine into a waiting state and unlocks the lock.
    295 // The goroutine can be made runnable again by calling goready(gp).
    296 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
    297 	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
    298 }
    299 
    300 func goready(gp *g, traceskip int) {
    301 	systemstack(func() {
    302 		ready(gp, traceskip, true)
    303 	})
    304 }
    305 
    306 //go:nosplit
    307 func acquireSudog() *sudog {
    308 	// Delicate dance: the semaphore implementation calls
    309 	// acquireSudog, acquireSudog calls new(sudog),
    310 	// new calls malloc, malloc can call the garbage collector,
    311 	// and the garbage collector calls the semaphore implementation
    312 	// in stopTheWorld.
    313 	// Break the cycle by doing acquirem/releasem around new(sudog).
    314 	// The acquirem/releasem increments m.locks during new(sudog),
    315 	// which keeps the garbage collector from being invoked.
    316 	mp := acquirem()
    317 	pp := mp.p.ptr()
    318 	if len(pp.sudogcache) == 0 {
    319 		lock(&sched.sudoglock)
    320 		// First, try to grab a batch from central cache.
    321 		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
    322 			s := sched.sudogcache
    323 			sched.sudogcache = s.next
    324 			s.next = nil
    325 			pp.sudogcache = append(pp.sudogcache, s)
    326 		}
    327 		unlock(&sched.sudoglock)
    328 		// If the central cache is empty, allocate a new one.
    329 		if len(pp.sudogcache) == 0 {
    330 			pp.sudogcache = append(pp.sudogcache, new(sudog))
    331 		}
    332 	}
    333 	n := len(pp.sudogcache)
    334 	s := pp.sudogcache[n-1]
    335 	pp.sudogcache[n-1] = nil
    336 	pp.sudogcache = pp.sudogcache[:n-1]
    337 	if s.elem != nil {
    338 		throw("acquireSudog: found s.elem != nil in cache")
    339 	}
    340 	releasem(mp)
    341 	return s
    342 }
    343 
    344 //go:nosplit
    345 func releaseSudog(s *sudog) {
    346 	if s.elem != nil {
    347 		throw("runtime: sudog with non-nil elem")
    348 	}
    349 	if s.isSelect {
    350 		throw("runtime: sudog with non-false isSelect")
    351 	}
    352 	if s.next != nil {
    353 		throw("runtime: sudog with non-nil next")
    354 	}
    355 	if s.prev != nil {
    356 		throw("runtime: sudog with non-nil prev")
    357 	}
    358 	if s.waitlink != nil {
    359 		throw("runtime: sudog with non-nil waitlink")
    360 	}
    361 	if s.c != nil {
    362 		throw("runtime: sudog with non-nil c")
    363 	}
    364 	gp := getg()
    365 	if gp.param != nil {
    366 		throw("runtime: releaseSudog with non-nil gp.param")
    367 	}
    368 	mp := acquirem() // avoid rescheduling to another P
    369 	pp := mp.p.ptr()
    370 	if len(pp.sudogcache) == cap(pp.sudogcache) {
    371 		// Transfer half of local cache to the central cache.
    372 		var first, last *sudog
    373 		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
    374 			n := len(pp.sudogcache)
    375 			p := pp.sudogcache[n-1]
    376 			pp.sudogcache[n-1] = nil
    377 			pp.sudogcache = pp.sudogcache[:n-1]
    378 			if first == nil {
    379 				first = p
    380 			} else {
    381 				last.next = p
    382 			}
    383 			last = p
    384 		}
    385 		lock(&sched.sudoglock)
    386 		last.next = sched.sudogcache
    387 		sched.sudogcache = first
    388 		unlock(&sched.sudoglock)
    389 	}
    390 	pp.sudogcache = append(pp.sudogcache, s)
    391 	releasem(mp)
    392 }
    393 
    394 // funcPC returns the entry PC of the function f.
    395 // It assumes that f is a func value. Otherwise the behavior is undefined.
    396 //go:nosplit
    397 func funcPC(f interface{}) uintptr {
    398 	return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
    399 }
    400 
    401 // called from assembly
    402 func badmcall(fn func(*g)) {
    403 	throw("runtime: mcall called on m->g0 stack")
    404 }
    405 
    406 func badmcall2(fn func(*g)) {
    407 	throw("runtime: mcall function returned")
    408 }
    409 
    410 func badreflectcall() {
    411 	panic(plainError("arg size to reflect.call more than 1GB"))
    412 }
    413 
    414 var badmorestackg0Msg = "fatal: morestack on g0\n"
    415 
    416 //go:nosplit
    417 //go:nowritebarrierrec
    418 func badmorestackg0() {
    419 	sp := stringStructOf(&badmorestackg0Msg)
    420 	write(2, sp.str, int32(sp.len))
    421 }
    422 
    423 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
    424 
    425 //go:nosplit
    426 //go:nowritebarrierrec
    427 func badmorestackgsignal() {
    428 	sp := stringStructOf(&badmorestackgsignalMsg)
    429 	write(2, sp.str, int32(sp.len))
    430 }
    431 
    432 //go:nosplit
    433 func badctxt() {
    434 	throw("ctxt != 0")
    435 }
    436 
    437 func lockedOSThread() bool {
    438 	gp := getg()
    439 	return gp.lockedm != 0 && gp.m.lockedg != 0
    440 }
    441 
    442 var (
    443 	allgs    []*g
    444 	allglock mutex
    445 )
    446 
    447 func allgadd(gp *g) {
    448 	if readgstatus(gp) == _Gidle {
    449 		throw("allgadd: bad status Gidle")
    450 	}
    451 
    452 	lock(&allglock)
    453 	allgs = append(allgs, gp)
    454 	allglen = uintptr(len(allgs))
    455 	unlock(&allglock)
    456 }
    457 
    458 const (
    459 	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
    460 	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
    461 	_GoidCacheBatch = 16
    462 )
    463 
    464 // The bootstrap sequence is:
    465 //
    466 //	call osinit
    467 //	call schedinit
    468 //	make & queue new G
    469 //	call runtimemstart
    470 //
    471 // The new G calls runtimemain.
    472 func schedinit() {
    473 	// raceinit must be the first call to race detector.
    474 	// In particular, it must be done before mallocinit below calls racemapshadow.
    475 	_g_ := getg()
    476 	if raceenabled {
    477 		_g_.racectx, raceprocctx0 = raceinit()
    478 	}
    479 
    480 	sched.maxmcount = 10000
    481 
    482 	tracebackinit()
    483 	moduledataverify()
    484 	stackinit()
    485 	mallocinit()
    486 	mcommoninit(_g_.m)
    487 	alginit()       // maps must not be used before this call
    488 	modulesinit()   // provides activeModules
    489 	typelinksinit() // uses maps, activeModules
    490 	itabsinit()     // uses activeModules
    491 
    492 	msigsave(_g_.m)
    493 	initSigmask = _g_.m.sigmask
    494 
    495 	goargs()
    496 	goenvs()
    497 	parsedebugvars()
    498 	gcinit()
    499 
    500 	sched.lastpoll = uint64(nanotime())
    501 	procs := ncpu
    502 	if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
    503 		procs = n
    504 	}
    505 	if procresize(procs) != nil {
    506 		throw("unknown runnable goroutine during bootstrap")
    507 	}
    508 
    509 	// For cgocheck > 1, we turn on the write barrier at all times
    510 	// and check all pointer writes. We can't do this until after
    511 	// procresize because the write barrier needs a P.
    512 	if debug.cgocheck > 1 {
    513 		writeBarrier.cgo = true
    514 		writeBarrier.enabled = true
    515 		for _, p := range allp {
    516 			p.wbBuf.reset()
    517 		}
    518 	}
    519 
    520 	if buildVersion == "" {
    521 		// Condition should never trigger. This code just serves
    522 		// to ensure runtimebuildVersion is kept in the resulting binary.
    523 		buildVersion = "unknown"
    524 	}
    525 }
    526 
    527 func dumpgstatus(gp *g) {
    528 	_g_ := getg()
    529 	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
    530 	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
    531 }
    532 
    533 func checkmcount() {
    534 	// sched lock is held
    535 	if mcount() > sched.maxmcount {
    536 		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
    537 		throw("thread exhaustion")
    538 	}
    539 }
    540 
    541 func mcommoninit(mp *m) {
    542 	_g_ := getg()
    543 
    544 	// g0 stack won't make sense for user (and is not necessary unwindable).
    545 	if _g_ != _g_.m.g0 {
    546 		callers(1, mp.createstack[:])
    547 	}
    548 
    549 	lock(&sched.lock)
    550 	if sched.mnext+1 < sched.mnext {
    551 		throw("runtime: thread ID overflow")
    552 	}
    553 	mp.id = sched.mnext
    554 	sched.mnext++
    555 	checkmcount()
    556 
    557 	mp.fastrand[0] = 1597334677 * uint32(mp.id)
    558 	mp.fastrand[1] = uint32(cputicks())
    559 	if mp.fastrand[0]|mp.fastrand[1] == 0 {
    560 		mp.fastrand[1] = 1
    561 	}
    562 
    563 	mpreinit(mp)
    564 	if mp.gsignal != nil {
    565 		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
    566 	}
    567 
    568 	// Add to allm so garbage collector doesn't free g->m
    569 	// when it is just in a register or thread-local storage.
    570 	mp.alllink = allm
    571 
    572 	// NumCgoCall() iterates over allm w/o schedlock,
    573 	// so we need to publish it safely.
    574 	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
    575 	unlock(&sched.lock)
    576 
    577 	// Allocate memory to hold a cgo traceback if the cgo call crashes.
    578 	if iscgo || GOOS == "solaris" || GOOS == "windows" {
    579 		mp.cgoCallers = new(cgoCallers)
    580 	}
    581 }
    582 
    583 // Mark gp ready to run.
    584 func ready(gp *g, traceskip int, next bool) {
    585 	if trace.enabled {
    586 		traceGoUnpark(gp, traceskip)
    587 	}
    588 
    589 	status := readgstatus(gp)
    590 
    591 	// Mark runnable.
    592 	_g_ := getg()
    593 	_g_.m.locks++ // disable preemption because it can be holding p in a local var
    594 	if status&^_Gscan != _Gwaiting {
    595 		dumpgstatus(gp)
    596 		throw("bad g->status in ready")
    597 	}
    598 
    599 	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
    600 	casgstatus(gp, _Gwaiting, _Grunnable)
    601 	runqput(_g_.m.p.ptr(), gp, next)
    602 	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
    603 		wakep()
    604 	}
    605 	_g_.m.locks--
    606 	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
    607 		_g_.stackguard0 = stackPreempt
    608 	}
    609 }
    610 
    611 func gcprocs() int32 {
    612 	// Figure out how many CPUs to use during GC.
    613 	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
    614 	lock(&sched.lock)
    615 	n := gomaxprocs
    616 	if n > ncpu {
    617 		n = ncpu
    618 	}
    619 	if n > _MaxGcproc {
    620 		n = _MaxGcproc
    621 	}
    622 	if n > sched.nmidle+1 { // one M is currently running
    623 		n = sched.nmidle + 1
    624 	}
    625 	unlock(&sched.lock)
    626 	return n
    627 }
    628 
    629 func needaddgcproc() bool {
    630 	lock(&sched.lock)
    631 	n := gomaxprocs
    632 	if n > ncpu {
    633 		n = ncpu
    634 	}
    635 	if n > _MaxGcproc {
    636 		n = _MaxGcproc
    637 	}
    638 	n -= sched.nmidle + 1 // one M is currently running
    639 	unlock(&sched.lock)
    640 	return n > 0
    641 }
    642 
    643 func helpgc(nproc int32) {
    644 	_g_ := getg()
    645 	lock(&sched.lock)
    646 	pos := 0
    647 	for n := int32(1); n < nproc; n++ { // one M is currently running
    648 		if allp[pos].mcache == _g_.m.mcache {
    649 			pos++
    650 		}
    651 		mp := mget()
    652 		if mp == nil {
    653 			throw("gcprocs inconsistency")
    654 		}
    655 		mp.helpgc = n
    656 		mp.p.set(allp[pos])
    657 		mp.mcache = allp[pos].mcache
    658 		pos++
    659 		notewakeup(&mp.park)
    660 	}
    661 	unlock(&sched.lock)
    662 }
    663 
    664 // freezeStopWait is a large value that freezetheworld sets
    665 // sched.stopwait to in order to request that all Gs permanently stop.
    666 const freezeStopWait = 0x7fffffff
    667 
    668 // freezing is set to non-zero if the runtime is trying to freeze the
    669 // world.
    670 var freezing uint32
    671 
    672 // Similar to stopTheWorld but best-effort and can be called several times.
    673 // There is no reverse operation, used during crashing.
    674 // This function must not lock any mutexes.
    675 func freezetheworld() {
    676 	atomic.Store(&freezing, 1)
    677 	// stopwait and preemption requests can be lost
    678 	// due to races with concurrently executing threads,
    679 	// so try several times
    680 	for i := 0; i < 5; i++ {
    681 		// this should tell the scheduler to not start any new goroutines
    682 		sched.stopwait = freezeStopWait
    683 		atomic.Store(&sched.gcwaiting, 1)
    684 		// this should stop running goroutines
    685 		if !preemptall() {
    686 			break // no running goroutines
    687 		}
    688 		usleep(1000)
    689 	}
    690 	// to be sure
    691 	usleep(1000)
    692 	preemptall()
    693 	usleep(1000)
    694 }
    695 
    696 func isscanstatus(status uint32) bool {
    697 	if status == _Gscan {
    698 		throw("isscanstatus: Bad status Gscan")
    699 	}
    700 	return status&_Gscan == _Gscan
    701 }
    702 
    703 // All reads and writes of g's status go through readgstatus, casgstatus
    704 // castogscanstatus, casfrom_Gscanstatus.
    705 //go:nosplit
    706 func readgstatus(gp *g) uint32 {
    707 	return atomic.Load(&gp.atomicstatus)
    708 }
    709 
    710 // Ownership of gcscanvalid:
    711 //
    712 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
    713 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
    714 //
    715 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
    716 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
    717 //
    718 // Note that the first condition implies an exception to the second:
    719 // if a second goroutine changes gp's status to _Grunning|_Gscan,
    720 // that second goroutine still does not have the right to modify gcscanvalid.
    721 
    722 // The Gscanstatuses are acting like locks and this releases them.
    723 // If it proves to be a performance hit we should be able to make these
    724 // simple atomic stores but for now we are going to throw if
    725 // we see an inconsistent state.
    726 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
    727 	success := false
    728 
    729 	// Check that transition is valid.
    730 	switch oldval {
    731 	default:
    732 		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
    733 		dumpgstatus(gp)
    734 		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
    735 	case _Gscanrunnable,
    736 		_Gscanwaiting,
    737 		_Gscanrunning,
    738 		_Gscansyscall:
    739 		if newval == oldval&^_Gscan {
    740 			success = atomic.Cas(&gp.atomicstatus, oldval, newval)
    741 		}
    742 	}
    743 	if !success {
    744 		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
    745 		dumpgstatus(gp)
    746 		throw("casfrom_Gscanstatus: gp->status is not in scan state")
    747 	}
    748 }
    749 
    750 // This will return false if the gp is not in the expected status and the cas fails.
    751 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
    752 func castogscanstatus(gp *g, oldval, newval uint32) bool {
    753 	switch oldval {
    754 	case _Grunnable,
    755 		_Grunning,
    756 		_Gwaiting,
    757 		_Gsyscall:
    758 		if newval == oldval|_Gscan {
    759 			return atomic.Cas(&gp.atomicstatus, oldval, newval)
    760 		}
    761 	}
    762 	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
    763 	throw("castogscanstatus")
    764 	panic("not reached")
    765 }
    766 
    767 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
    768 // and casfrom_Gscanstatus instead.
    769 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
    770 // put it in the Gscan state is finished.
    771 //go:nosplit
    772 func casgstatus(gp *g, oldval, newval uint32) {
    773 	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
    774 		systemstack(func() {
    775 			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
    776 			throw("casgstatus: bad incoming values")
    777 		})
    778 	}
    779 
    780 	if oldval == _Grunning && gp.gcscanvalid {
    781 		// If oldvall == _Grunning, then the actual status must be
    782 		// _Grunning or _Grunning|_Gscan; either way,
    783 		// we own gp.gcscanvalid, so it's safe to read.
    784 		// gp.gcscanvalid must not be true when we are running.
    785 		systemstack(func() {
    786 			print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
    787 			throw("casgstatus")
    788 		})
    789 	}
    790 
    791 	// See http://golang.org/cl/21503 for justification of the yield delay.
    792 	const yieldDelay = 5 * 1000
    793 	var nextYield int64
    794 
    795 	// loop if gp->atomicstatus is in a scan state giving
    796 	// GC time to finish and change the state to oldval.
    797 	for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
    798 		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
    799 			systemstack(func() {
    800 				throw("casgstatus: waiting for Gwaiting but is Grunnable")
    801 			})
    802 		}
    803 		// Help GC if needed.
    804 		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
    805 		// 	gp.preemptscan = false
    806 		// 	systemstack(func() {
    807 		// 		gcphasework(gp)
    808 		// 	})
    809 		// }
    810 		// But meanwhile just yield.
    811 		if i == 0 {
    812 			nextYield = nanotime() + yieldDelay
    813 		}
    814 		if nanotime() < nextYield {
    815 			for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
    816 				procyield(1)
    817 			}
    818 		} else {
    819 			osyield()
    820 			nextYield = nanotime() + yieldDelay/2
    821 		}
    822 	}
    823 	if newval == _Grunning {
    824 		gp.gcscanvalid = false
    825 	}
    826 }
    827 
    828 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
    829 // Returns old status. Cannot call casgstatus directly, because we are racing with an
    830 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
    831 // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
    832 // it would loop waiting for the status to go back to Gwaiting, which it never will.
    833 //go:nosplit
    834 func casgcopystack(gp *g) uint32 {
    835 	for {
    836 		oldstatus := readgstatus(gp) &^ _Gscan
    837 		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
    838 			throw("copystack: bad status, not Gwaiting or Grunnable")
    839 		}
    840 		if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
    841 			return oldstatus
    842 		}
    843 	}
    844 }
    845 
    846 // scang blocks until gp's stack has been scanned.
    847 // It might be scanned by scang or it might be scanned by the goroutine itself.
    848 // Either way, the stack scan has completed when scang returns.
    849 func scang(gp *g, gcw *gcWork) {
    850 	// Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
    851 	// Nothing is racing with us now, but gcscandone might be set to true left over
    852 	// from an earlier round of stack scanning (we scan twice per GC).
    853 	// We use gcscandone to record whether the scan has been done during this round.
    854 
    855 	gp.gcscandone = false
    856 
    857 	// See http://golang.org/cl/21503 for justification of the yield delay.
    858 	const yieldDelay = 10 * 1000
    859 	var nextYield int64
    860 
    861 	// Endeavor to get gcscandone set to true,
    862 	// either by doing the stack scan ourselves or by coercing gp to scan itself.
    863 	// gp.gcscandone can transition from false to true when we're not looking
    864 	// (if we asked for preemption), so any time we lock the status using
    865 	// castogscanstatus we have to double-check that the scan is still not done.
    866 loop:
    867 	for i := 0; !gp.gcscandone; i++ {
    868 		switch s := readgstatus(gp); s {
    869 		default:
    870 			dumpgstatus(gp)
    871 			throw("stopg: invalid status")
    872 
    873 		case _Gdead:
    874 			// No stack.
    875 			gp.gcscandone = true
    876 			break loop
    877 
    878 		case _Gcopystack:
    879 		// Stack being switched. Go around again.
    880 
    881 		case _Grunnable, _Gsyscall, _Gwaiting:
    882 			// Claim goroutine by setting scan bit.
    883 			// Racing with execution or readying of gp.
    884 			// The scan bit keeps them from running
    885 			// the goroutine until we're done.
    886 			if castogscanstatus(gp, s, s|_Gscan) {
    887 				if !gp.gcscandone {
    888 					scanstack(gp, gcw)
    889 					gp.gcscandone = true
    890 				}
    891 				restartg(gp)
    892 				break loop
    893 			}
    894 
    895 		case _Gscanwaiting:
    896 		// newstack is doing a scan for us right now. Wait.
    897 
    898 		case _Grunning:
    899 			// Goroutine running. Try to preempt execution so it can scan itself.
    900 			// The preemption handler (in newstack) does the actual scan.
    901 
    902 			// Optimization: if there is already a pending preemption request
    903 			// (from the previous loop iteration), don't bother with the atomics.
    904 			if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
    905 				break
    906 			}
    907 
    908 			// Ask for preemption and self scan.
    909 			if castogscanstatus(gp, _Grunning, _Gscanrunning) {
    910 				if !gp.gcscandone {
    911 					gp.preemptscan = true
    912 					gp.preempt = true
    913 					gp.stackguard0 = stackPreempt
    914 				}
    915 				casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
    916 			}
    917 		}
    918 
    919 		if i == 0 {
    920 			nextYield = nanotime() + yieldDelay
    921 		}
    922 		if nanotime() < nextYield {
    923 			procyield(10)
    924 		} else {
    925 			osyield()
    926 			nextYield = nanotime() + yieldDelay/2
    927 		}
    928 	}
    929 
    930 	gp.preemptscan = false // cancel scan request if no longer needed
    931 }
    932 
    933 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
    934 func restartg(gp *g) {
    935 	s := readgstatus(gp)
    936 	switch s {
    937 	default:
    938 		dumpgstatus(gp)
    939 		throw("restartg: unexpected status")
    940 
    941 	case _Gdead:
    942 	// ok
    943 
    944 	case _Gscanrunnable,
    945 		_Gscanwaiting,
    946 		_Gscansyscall:
    947 		casfrom_Gscanstatus(gp, s, s&^_Gscan)
    948 	}
    949 }
    950 
    951 // stopTheWorld stops all P's from executing goroutines, interrupting
    952 // all goroutines at GC safe points and records reason as the reason
    953 // for the stop. On return, only the current goroutine's P is running.
    954 // stopTheWorld must not be called from a system stack and the caller
    955 // must not hold worldsema. The caller must call startTheWorld when
    956 // other P's should resume execution.
    957 //
    958 // stopTheWorld is safe for multiple goroutines to call at the
    959 // same time. Each will execute its own stop, and the stops will
    960 // be serialized.
    961 //
    962 // This is also used by routines that do stack dumps. If the system is
    963 // in panic or being exited, this may not reliably stop all
    964 // goroutines.
    965 func stopTheWorld(reason string) {
    966 	semacquire(&worldsema)
    967 	getg().m.preemptoff = reason
    968 	systemstack(stopTheWorldWithSema)
    969 }
    970 
    971 // startTheWorld undoes the effects of stopTheWorld.
    972 func startTheWorld() {
    973 	systemstack(func() { startTheWorldWithSema(false) })
    974 	// worldsema must be held over startTheWorldWithSema to ensure
    975 	// gomaxprocs cannot change while worldsema is held.
    976 	semrelease(&worldsema)
    977 	getg().m.preemptoff = ""
    978 }
    979 
    980 // Holding worldsema grants an M the right to try to stop the world
    981 // and prevents gomaxprocs from changing concurrently.
    982 var worldsema uint32 = 1
    983 
    984 // stopTheWorldWithSema is the core implementation of stopTheWorld.
    985 // The caller is responsible for acquiring worldsema and disabling
    986 // preemption first and then should stopTheWorldWithSema on the system
    987 // stack:
    988 //
    989 //	semacquire(&worldsema, 0)
    990 //	m.preemptoff = "reason"
    991 //	systemstack(stopTheWorldWithSema)
    992 //
    993 // When finished, the caller must either call startTheWorld or undo
    994 // these three operations separately:
    995 //
    996 //	m.preemptoff = ""
    997 //	systemstack(startTheWorldWithSema)
    998 //	semrelease(&worldsema)
    999 //
   1000 // It is allowed to acquire worldsema once and then execute multiple
   1001 // startTheWorldWithSema/stopTheWorldWithSema pairs.
   1002 // Other P's are able to execute between successive calls to
   1003 // startTheWorldWithSema and stopTheWorldWithSema.
   1004 // Holding worldsema causes any other goroutines invoking
   1005 // stopTheWorld to block.
   1006 func stopTheWorldWithSema() {
   1007 	_g_ := getg()
   1008 
   1009 	// If we hold a lock, then we won't be able to stop another M
   1010 	// that is blocked trying to acquire the lock.
   1011 	if _g_.m.locks > 0 {
   1012 		throw("stopTheWorld: holding locks")
   1013 	}
   1014 
   1015 	lock(&sched.lock)
   1016 	sched.stopwait = gomaxprocs
   1017 	atomic.Store(&sched.gcwaiting, 1)
   1018 	preemptall()
   1019 	// stop current P
   1020 	_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
   1021 	sched.stopwait--
   1022 	// try to retake all P's in Psyscall status
   1023 	for _, p := range allp {
   1024 		s := p.status
   1025 		if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
   1026 			if trace.enabled {
   1027 				traceGoSysBlock(p)
   1028 				traceProcStop(p)
   1029 			}
   1030 			p.syscalltick++
   1031 			sched.stopwait--
   1032 		}
   1033 	}
   1034 	// stop idle P's
   1035 	for {
   1036 		p := pidleget()
   1037 		if p == nil {
   1038 			break
   1039 		}
   1040 		p.status = _Pgcstop
   1041 		sched.stopwait--
   1042 	}
   1043 	wait := sched.stopwait > 0
   1044 	unlock(&sched.lock)
   1045 
   1046 	// wait for remaining P's to stop voluntarily
   1047 	if wait {
   1048 		for {
   1049 			// wait for 100us, then try to re-preempt in case of any races
   1050 			if notetsleep(&sched.stopnote, 100*1000) {
   1051 				noteclear(&sched.stopnote)
   1052 				break
   1053 			}
   1054 			preemptall()
   1055 		}
   1056 	}
   1057 
   1058 	// sanity checks
   1059 	bad := ""
   1060 	if sched.stopwait != 0 {
   1061 		bad = "stopTheWorld: not stopped (stopwait != 0)"
   1062 	} else {
   1063 		for _, p := range allp {
   1064 			if p.status != _Pgcstop {
   1065 				bad = "stopTheWorld: not stopped (status != _Pgcstop)"
   1066 			}
   1067 		}
   1068 	}
   1069 	if atomic.Load(&freezing) != 0 {
   1070 		// Some other thread is panicking. This can cause the
   1071 		// sanity checks above to fail if the panic happens in
   1072 		// the signal handler on a stopped thread. Either way,
   1073 		// we should halt this thread.
   1074 		lock(&deadlock)
   1075 		lock(&deadlock)
   1076 	}
   1077 	if bad != "" {
   1078 		throw(bad)
   1079 	}
   1080 }
   1081 
   1082 func mhelpgc() {
   1083 	_g_ := getg()
   1084 	_g_.m.helpgc = -1
   1085 }
   1086 
   1087 func startTheWorldWithSema(emitTraceEvent bool) int64 {
   1088 	_g_ := getg()
   1089 
   1090 	_g_.m.locks++ // disable preemption because it can be holding p in a local var
   1091 	if netpollinited() {
   1092 		gp := netpoll(false) // non-blocking
   1093 		injectglist(gp)
   1094 	}
   1095 	add := needaddgcproc()
   1096 	lock(&sched.lock)
   1097 
   1098 	procs := gomaxprocs
   1099 	if newprocs != 0 {
   1100 		procs = newprocs
   1101 		newprocs = 0
   1102 	}
   1103 	p1 := procresize(procs)
   1104 	sched.gcwaiting = 0
   1105 	if sched.sysmonwait != 0 {
   1106 		sched.sysmonwait = 0
   1107 		notewakeup(&sched.sysmonnote)
   1108 	}
   1109 	unlock(&sched.lock)
   1110 
   1111 	for p1 != nil {
   1112 		p := p1
   1113 		p1 = p1.link.ptr()
   1114 		if p.m != 0 {
   1115 			mp := p.m.ptr()
   1116 			p.m = 0
   1117 			if mp.nextp != 0 {
   1118 				throw("startTheWorld: inconsistent mp->nextp")
   1119 			}
   1120 			mp.nextp.set(p)
   1121 			notewakeup(&mp.park)
   1122 		} else {
   1123 			// Start M to run P.  Do not start another M below.
   1124 			newm(nil, p)
   1125 			add = false
   1126 		}
   1127 	}
   1128 
   1129 	// Capture start-the-world time before doing clean-up tasks.
   1130 	startTime := nanotime()
   1131 	if emitTraceEvent {
   1132 		traceGCSTWDone()
   1133 	}
   1134 
   1135 	// Wakeup an additional proc in case we have excessive runnable goroutines
   1136 	// in local queues or in the global queue. If we don't, the proc will park itself.
   1137 	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
   1138 	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   1139 		wakep()
   1140 	}
   1141 
   1142 	if add {
   1143 		// If GC could have used another helper proc, start one now,
   1144 		// in the hope that it will be available next time.
   1145 		// It would have been even better to start it before the collection,
   1146 		// but doing so requires allocating memory, so it's tricky to
   1147 		// coordinate. This lazy approach works out in practice:
   1148 		// we don't mind if the first couple gc rounds don't have quite
   1149 		// the maximum number of procs.
   1150 		newm(mhelpgc, nil)
   1151 	}
   1152 	_g_.m.locks--
   1153 	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   1154 		_g_.stackguard0 = stackPreempt
   1155 	}
   1156 
   1157 	return startTime
   1158 }
   1159 
   1160 // Called to start an M.
   1161 //
   1162 // This must not split the stack because we may not even have stack
   1163 // bounds set up yet.
   1164 //
   1165 // May run during STW (because it doesn't have a P yet), so write
   1166 // barriers are not allowed.
   1167 //
   1168 //go:nosplit
   1169 //go:nowritebarrierrec
   1170 func mstart() {
   1171 	_g_ := getg()
   1172 
   1173 	osStack := _g_.stack.lo == 0
   1174 	if osStack {
   1175 		// Initialize stack bounds from system stack.
   1176 		// Cgo may have left stack size in stack.hi.
   1177 		size := _g_.stack.hi
   1178 		if size == 0 {
   1179 			size = 8192 * sys.StackGuardMultiplier
   1180 		}
   1181 		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
   1182 		_g_.stack.lo = _g_.stack.hi - size + 1024
   1183 	}
   1184 	// Initialize stack guards so that we can start calling
   1185 	// both Go and C functions with stack growth prologues.
   1186 	_g_.stackguard0 = _g_.stack.lo + _StackGuard
   1187 	_g_.stackguard1 = _g_.stackguard0
   1188 	mstart1(0)
   1189 
   1190 	// Exit this thread.
   1191 	if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" {
   1192 		// Window, Solaris and Plan 9 always system-allocate
   1193 		// the stack, but put it in _g_.stack before mstart,
   1194 		// so the logic above hasn't set osStack yet.
   1195 		osStack = true
   1196 	}
   1197 	mexit(osStack)
   1198 }
   1199 
   1200 func mstart1(dummy int32) {
   1201 	_g_ := getg()
   1202 
   1203 	if _g_ != _g_.m.g0 {
   1204 		throw("bad runtimemstart")
   1205 	}
   1206 
   1207 	// Record the caller for use as the top of stack in mcall and
   1208 	// for terminating the thread.
   1209 	// We're never coming back to mstart1 after we call schedule,
   1210 	// so other calls can reuse the current frame.
   1211 	save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
   1212 	asminit()
   1213 	minit()
   1214 
   1215 	// Install signal handlers; after minit so that minit can
   1216 	// prepare the thread to be able to handle the signals.
   1217 	if _g_.m == &m0 {
   1218 		mstartm0()
   1219 	}
   1220 
   1221 	if fn := _g_.m.mstartfn; fn != nil {
   1222 		fn()
   1223 	}
   1224 
   1225 	if _g_.m.helpgc != 0 {
   1226 		_g_.m.helpgc = 0
   1227 		stopm()
   1228 	} else if _g_.m != &m0 {
   1229 		acquirep(_g_.m.nextp.ptr())
   1230 		_g_.m.nextp = 0
   1231 	}
   1232 	schedule()
   1233 }
   1234 
   1235 // mstartm0 implements part of mstart1 that only runs on the m0.
   1236 //
   1237 // Write barriers are allowed here because we know the GC can't be
   1238 // running yet, so they'll be no-ops.
   1239 //
   1240 //go:yeswritebarrierrec
   1241 func mstartm0() {
   1242 	// Create an extra M for callbacks on threads not created by Go.
   1243 	if iscgo && !cgoHasExtraM {
   1244 		cgoHasExtraM = true
   1245 		newextram()
   1246 	}
   1247 	initsig(false)
   1248 }
   1249 
   1250 // mexit tears down and exits the current thread.
   1251 //
   1252 // Don't call this directly to exit the thread, since it must run at
   1253 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
   1254 // unwind the stack to the point that exits the thread.
   1255 //
   1256 // It is entered with m.p != nil, so write barriers are allowed. It
   1257 // will release the P before exiting.
   1258 //
   1259 //go:yeswritebarrierrec
   1260 func mexit(osStack bool) {
   1261 	g := getg()
   1262 	m := g.m
   1263 
   1264 	if m == &m0 {
   1265 		// This is the main thread. Just wedge it.
   1266 		//
   1267 		// On Linux, exiting the main thread puts the process
   1268 		// into a non-waitable zombie state. On Plan 9,
   1269 		// exiting the main thread unblocks wait even though
   1270 		// other threads are still running. On Solaris we can
   1271 		// neither exitThread nor return from mstart. Other
   1272 		// bad things probably happen on other platforms.
   1273 		//
   1274 		// We could try to clean up this M more before wedging
   1275 		// it, but that complicates signal handling.
   1276 		handoffp(releasep())
   1277 		lock(&sched.lock)
   1278 		sched.nmfreed++
   1279 		checkdead()
   1280 		unlock(&sched.lock)
   1281 		notesleep(&m.park)
   1282 		throw("locked m0 woke up")
   1283 	}
   1284 
   1285 	sigblock()
   1286 	unminit()
   1287 
   1288 	// Free the gsignal stack.
   1289 	if m.gsignal != nil {
   1290 		stackfree(m.gsignal.stack)
   1291 	}
   1292 
   1293 	// Remove m from allm.
   1294 	lock(&sched.lock)
   1295 	for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
   1296 		if *pprev == m {
   1297 			*pprev = m.alllink
   1298 			goto found
   1299 		}
   1300 	}
   1301 	throw("m not found in allm")
   1302 found:
   1303 	if !osStack {
   1304 		// Delay reaping m until it's done with the stack.
   1305 		//
   1306 		// If this is using an OS stack, the OS will free it
   1307 		// so there's no need for reaping.
   1308 		atomic.Store(&m.freeWait, 1)
   1309 		// Put m on the free list, though it will not be reaped until
   1310 		// freeWait is 0. Note that the free list must not be linked
   1311 		// through alllink because some functions walk allm without
   1312 		// locking, so may be using alllink.
   1313 		m.freelink = sched.freem
   1314 		sched.freem = m
   1315 	}
   1316 	unlock(&sched.lock)
   1317 
   1318 	// Release the P.
   1319 	handoffp(releasep())
   1320 	// After this point we must not have write barriers.
   1321 
   1322 	// Invoke the deadlock detector. This must happen after
   1323 	// handoffp because it may have started a new M to take our
   1324 	// P's work.
   1325 	lock(&sched.lock)
   1326 	sched.nmfreed++
   1327 	checkdead()
   1328 	unlock(&sched.lock)
   1329 
   1330 	if osStack {
   1331 		// Return from mstart and let the system thread
   1332 		// library free the g0 stack and terminate the thread.
   1333 		return
   1334 	}
   1335 
   1336 	// mstart is the thread's entry point, so there's nothing to
   1337 	// return to. Exit the thread directly. exitThread will clear
   1338 	// m.freeWait when it's done with the stack and the m can be
   1339 	// reaped.
   1340 	exitThread(&m.freeWait)
   1341 }
   1342 
   1343 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
   1344 // If a P is currently executing code, this will bring the P to a GC
   1345 // safe point and execute fn on that P. If the P is not executing code
   1346 // (it is idle or in a syscall), this will call fn(p) directly while
   1347 // preventing the P from exiting its state. This does not ensure that
   1348 // fn will run on every CPU executing Go code, but it acts as a global
   1349 // memory barrier. GC uses this as a "ragged barrier."
   1350 //
   1351 // The caller must hold worldsema.
   1352 //
   1353 //go:systemstack
   1354 func forEachP(fn func(*p)) {
   1355 	mp := acquirem()
   1356 	_p_ := getg().m.p.ptr()
   1357 
   1358 	lock(&sched.lock)
   1359 	if sched.safePointWait != 0 {
   1360 		throw("forEachP: sched.safePointWait != 0")
   1361 	}
   1362 	sched.safePointWait = gomaxprocs - 1
   1363 	sched.safePointFn = fn
   1364 
   1365 	// Ask all Ps to run the safe point function.
   1366 	for _, p := range allp {
   1367 		if p != _p_ {
   1368 			atomic.Store(&p.runSafePointFn, 1)
   1369 		}
   1370 	}
   1371 	preemptall()
   1372 
   1373 	// Any P entering _Pidle or _Psyscall from now on will observe
   1374 	// p.runSafePointFn == 1 and will call runSafePointFn when
   1375 	// changing its status to _Pidle/_Psyscall.
   1376 
   1377 	// Run safe point function for all idle Ps. sched.pidle will
   1378 	// not change because we hold sched.lock.
   1379 	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
   1380 		if atomic.Cas(&p.runSafePointFn, 1, 0) {
   1381 			fn(p)
   1382 			sched.safePointWait--
   1383 		}
   1384 	}
   1385 
   1386 	wait := sched.safePointWait > 0
   1387 	unlock(&sched.lock)
   1388 
   1389 	// Run fn for the current P.
   1390 	fn(_p_)
   1391 
   1392 	// Force Ps currently in _Psyscall into _Pidle and hand them
   1393 	// off to induce safe point function execution.
   1394 	for _, p := range allp {
   1395 		s := p.status
   1396 		if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
   1397 			if trace.enabled {
   1398 				traceGoSysBlock(p)
   1399 				traceProcStop(p)
   1400 			}
   1401 			p.syscalltick++
   1402 			handoffp(p)
   1403 		}
   1404 	}
   1405 
   1406 	// Wait for remaining Ps to run fn.
   1407 	if wait {
   1408 		for {
   1409 			// Wait for 100us, then try to re-preempt in
   1410 			// case of any races.
   1411 			//
   1412 			// Requires system stack.
   1413 			if notetsleep(&sched.safePointNote, 100*1000) {
   1414 				noteclear(&sched.safePointNote)
   1415 				break
   1416 			}
   1417 			preemptall()
   1418 		}
   1419 	}
   1420 	if sched.safePointWait != 0 {
   1421 		throw("forEachP: not done")
   1422 	}
   1423 	for _, p := range allp {
   1424 		if p.runSafePointFn != 0 {
   1425 			throw("forEachP: P did not run fn")
   1426 		}
   1427 	}
   1428 
   1429 	lock(&sched.lock)
   1430 	sched.safePointFn = nil
   1431 	unlock(&sched.lock)
   1432 	releasem(mp)
   1433 }
   1434 
   1435 // runSafePointFn runs the safe point function, if any, for this P.
   1436 // This should be called like
   1437 //
   1438 //     if getg().m.p.runSafePointFn != 0 {
   1439 //         runSafePointFn()
   1440 //     }
   1441 //
   1442 // runSafePointFn must be checked on any transition in to _Pidle or
   1443 // _Psyscall to avoid a race where forEachP sees that the P is running
   1444 // just before the P goes into _Pidle/_Psyscall and neither forEachP
   1445 // nor the P run the safe-point function.
   1446 func runSafePointFn() {
   1447 	p := getg().m.p.ptr()
   1448 	// Resolve the race between forEachP running the safe-point
   1449 	// function on this P's behalf and this P running the
   1450 	// safe-point function directly.
   1451 	if !atomic.Cas(&p.runSafePointFn, 1, 0) {
   1452 		return
   1453 	}
   1454 	sched.safePointFn(p)
   1455 	lock(&sched.lock)
   1456 	sched.safePointWait--
   1457 	if sched.safePointWait == 0 {
   1458 		notewakeup(&sched.safePointNote)
   1459 	}
   1460 	unlock(&sched.lock)
   1461 }
   1462 
   1463 // When running with cgo, we call _cgo_thread_start
   1464 // to start threads for us so that we can play nicely with
   1465 // foreign code.
   1466 var cgoThreadStart unsafe.Pointer
   1467 
   1468 type cgothreadstart struct {
   1469 	g   guintptr
   1470 	tls *uint64
   1471 	fn  unsafe.Pointer
   1472 }
   1473 
   1474 // Allocate a new m unassociated with any thread.
   1475 // Can use p for allocation context if needed.
   1476 // fn is recorded as the new m's m.mstartfn.
   1477 //
   1478 // This function is allowed to have write barriers even if the caller
   1479 // isn't because it borrows _p_.
   1480 //
   1481 //go:yeswritebarrierrec
   1482 func allocm(_p_ *p, fn func()) *m {
   1483 	_g_ := getg()
   1484 	_g_.m.locks++ // disable GC because it can be called from sysmon
   1485 	if _g_.m.p == 0 {
   1486 		acquirep(_p_) // temporarily borrow p for mallocs in this function
   1487 	}
   1488 
   1489 	// Release the free M list. We need to do this somewhere and
   1490 	// this may free up a stack we can use.
   1491 	if sched.freem != nil {
   1492 		lock(&sched.lock)
   1493 		var newList *m
   1494 		for freem := sched.freem; freem != nil; {
   1495 			if freem.freeWait != 0 {
   1496 				next := freem.freelink
   1497 				freem.freelink = newList
   1498 				newList = freem
   1499 				freem = next
   1500 				continue
   1501 			}
   1502 			stackfree(freem.g0.stack)
   1503 			freem = freem.freelink
   1504 		}
   1505 		sched.freem = newList
   1506 		unlock(&sched.lock)
   1507 	}
   1508 
   1509 	mp := new(m)
   1510 	mp.mstartfn = fn
   1511 	mcommoninit(mp)
   1512 
   1513 	// In case of cgo or Solaris, pthread_create will make us a stack.
   1514 	// Windows and Plan 9 will layout sched stack on OS stack.
   1515 	if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
   1516 		mp.g0 = malg(-1)
   1517 	} else {
   1518 		mp.g0 = malg(8192 * sys.StackGuardMultiplier)
   1519 	}
   1520 	mp.g0.m = mp
   1521 
   1522 	if _p_ == _g_.m.p.ptr() {
   1523 		releasep()
   1524 	}
   1525 	_g_.m.locks--
   1526 	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   1527 		_g_.stackguard0 = stackPreempt
   1528 	}
   1529 
   1530 	return mp
   1531 }
   1532 
   1533 // needm is called when a cgo callback happens on a
   1534 // thread without an m (a thread not created by Go).
   1535 // In this case, needm is expected to find an m to use
   1536 // and return with m, g initialized correctly.
   1537 // Since m and g are not set now (likely nil, but see below)
   1538 // needm is limited in what routines it can call. In particular
   1539 // it can only call nosplit functions (textflag 7) and cannot
   1540 // do any scheduling that requires an m.
   1541 //
   1542 // In order to avoid needing heavy lifting here, we adopt
   1543 // the following strategy: there is a stack of available m's
   1544 // that can be stolen. Using compare-and-swap
   1545 // to pop from the stack has ABA races, so we simulate
   1546 // a lock by doing an exchange (via casp) to steal the stack
   1547 // head and replace the top pointer with MLOCKED (1).
   1548 // This serves as a simple spin lock that we can use even
   1549 // without an m. The thread that locks the stack in this way
   1550 // unlocks the stack by storing a valid stack head pointer.
   1551 //
   1552 // In order to make sure that there is always an m structure
   1553 // available to be stolen, we maintain the invariant that there
   1554 // is always one more than needed. At the beginning of the
   1555 // program (if cgo is in use) the list is seeded with a single m.
   1556 // If needm finds that it has taken the last m off the list, its job
   1557 // is - once it has installed its own m so that it can do things like
   1558 // allocate memory - to create a spare m and put it on the list.
   1559 //
   1560 // Each of these extra m's also has a g0 and a curg that are
   1561 // pressed into service as the scheduling stack and current
   1562 // goroutine for the duration of the cgo callback.
   1563 //
   1564 // When the callback is done with the m, it calls dropm to
   1565 // put the m back on the list.
   1566 //go:nosplit
   1567 func needm(x byte) {
   1568 	if iscgo && !cgoHasExtraM {
   1569 		// Can happen if C/C++ code calls Go from a global ctor.
   1570 		// Can not throw, because scheduler is not initialized yet.
   1571 		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
   1572 		exit(1)
   1573 	}
   1574 
   1575 	// Lock extra list, take head, unlock popped list.
   1576 	// nilokay=false is safe here because of the invariant above,
   1577 	// that the extra list always contains or will soon contain
   1578 	// at least one m.
   1579 	mp := lockextra(false)
   1580 
   1581 	// Set needextram when we've just emptied the list,
   1582 	// so that the eventual call into cgocallbackg will
   1583 	// allocate a new m for the extra list. We delay the
   1584 	// allocation until then so that it can be done
   1585 	// after exitsyscall makes sure it is okay to be
   1586 	// running at all (that is, there's no garbage collection
   1587 	// running right now).
   1588 	mp.needextram = mp.schedlink == 0
   1589 	extraMCount--
   1590 	unlockextra(mp.schedlink.ptr())
   1591 
   1592 	// Save and block signals before installing g.
   1593 	// Once g is installed, any incoming signals will try to execute,
   1594 	// but we won't have the sigaltstack settings and other data
   1595 	// set up appropriately until the end of minit, which will
   1596 	// unblock the signals. This is the same dance as when
   1597 	// starting a new m to run Go code via newosproc.
   1598 	msigsave(mp)
   1599 	sigblock()
   1600 
   1601 	// Install g (= m->g0) and set the stack bounds
   1602 	// to match the current stack. We don't actually know
   1603 	// how big the stack is, like we don't know how big any
   1604 	// scheduling stack is, but we assume there's at least 32 kB,
   1605 	// which is more than enough for us.
   1606 	setg(mp.g0)
   1607 	_g_ := getg()
   1608 	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
   1609 	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
   1610 	_g_.stackguard0 = _g_.stack.lo + _StackGuard
   1611 
   1612 	// Initialize this thread to use the m.
   1613 	asminit()
   1614 	minit()
   1615 
   1616 	// mp.curg is now a real goroutine.
   1617 	casgstatus(mp.curg, _Gdead, _Gsyscall)
   1618 	atomic.Xadd(&sched.ngsys, -1)
   1619 }
   1620 
   1621 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
   1622 
   1623 // newextram allocates m's and puts them on the extra list.
   1624 // It is called with a working local m, so that it can do things
   1625 // like call schedlock and allocate.
   1626 func newextram() {
   1627 	c := atomic.Xchg(&extraMWaiters, 0)
   1628 	if c > 0 {
   1629 		for i := uint32(0); i < c; i++ {
   1630 			oneNewExtraM()
   1631 		}
   1632 	} else {
   1633 		// Make sure there is at least one extra M.
   1634 		mp := lockextra(true)
   1635 		unlockextra(mp)
   1636 		if mp == nil {
   1637 			oneNewExtraM()
   1638 		}
   1639 	}
   1640 }
   1641 
   1642 // oneNewExtraM allocates an m and puts it on the extra list.
   1643 func oneNewExtraM() {
   1644 	// Create extra goroutine locked to extra m.
   1645 	// The goroutine is the context in which the cgo callback will run.
   1646 	// The sched.pc will never be returned to, but setting it to
   1647 	// goexit makes clear to the traceback routines where
   1648 	// the goroutine stack ends.
   1649 	mp := allocm(nil, nil)
   1650 	gp := malg(4096)
   1651 	gp.sched.pc = funcPC(goexit) + sys.PCQuantum
   1652 	gp.sched.sp = gp.stack.hi
   1653 	gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
   1654 	gp.sched.lr = 0
   1655 	gp.sched.g = guintptr(unsafe.Pointer(gp))
   1656 	gp.syscallpc = gp.sched.pc
   1657 	gp.syscallsp = gp.sched.sp
   1658 	gp.stktopsp = gp.sched.sp
   1659 	gp.gcscanvalid = true
   1660 	gp.gcscandone = true
   1661 	// malg returns status as _Gidle. Change to _Gdead before
   1662 	// adding to allg where GC can see it. We use _Gdead to hide
   1663 	// this from tracebacks and stack scans since it isn't a
   1664 	// "real" goroutine until needm grabs it.
   1665 	casgstatus(gp, _Gidle, _Gdead)
   1666 	gp.m = mp
   1667 	mp.curg = gp
   1668 	mp.lockedInt++
   1669 	mp.lockedg.set(gp)
   1670 	gp.lockedm.set(mp)
   1671 	gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
   1672 	if raceenabled {
   1673 		gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
   1674 	}
   1675 	// put on allg for garbage collector
   1676 	allgadd(gp)
   1677 
   1678 	// gp is now on the allg list, but we don't want it to be
   1679 	// counted by gcount. It would be more "proper" to increment
   1680 	// sched.ngfree, but that requires locking. Incrementing ngsys
   1681 	// has the same effect.
   1682 	atomic.Xadd(&sched.ngsys, +1)
   1683 
   1684 	// Add m to the extra list.
   1685 	mnext := lockextra(true)
   1686 	mp.schedlink.set(mnext)
   1687 	extraMCount++
   1688 	unlockextra(mp)
   1689 }
   1690 
   1691 // dropm is called when a cgo callback has called needm but is now
   1692 // done with the callback and returning back into the non-Go thread.
   1693 // It puts the current m back onto the extra list.
   1694 //
   1695 // The main expense here is the call to signalstack to release the
   1696 // m's signal stack, and then the call to needm on the next callback
   1697 // from this thread. It is tempting to try to save the m for next time,
   1698 // which would eliminate both these costs, but there might not be
   1699 // a next time: the current thread (which Go does not control) might exit.
   1700 // If we saved the m for that thread, there would be an m leak each time
   1701 // such a thread exited. Instead, we acquire and release an m on each
   1702 // call. These should typically not be scheduling operations, just a few
   1703 // atomics, so the cost should be small.
   1704 //
   1705 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
   1706 // variable using pthread_key_create. Unlike the pthread keys we already use
   1707 // on OS X, this dummy key would never be read by Go code. It would exist
   1708 // only so that we could register at thread-exit-time destructor.
   1709 // That destructor would put the m back onto the extra list.
   1710 // This is purely a performance optimization. The current version,
   1711 // in which dropm happens on each cgo call, is still correct too.
   1712 // We may have to keep the current version on systems with cgo
   1713 // but without pthreads, like Windows.
   1714 func dropm() {
   1715 	// Clear m and g, and return m to the extra list.
   1716 	// After the call to setg we can only call nosplit functions
   1717 	// with no pointer manipulation.
   1718 	mp := getg().m
   1719 
   1720 	// Return mp.curg to dead state.
   1721 	casgstatus(mp.curg, _Gsyscall, _Gdead)
   1722 	atomic.Xadd(&sched.ngsys, +1)
   1723 
   1724 	// Block signals before unminit.
   1725 	// Unminit unregisters the signal handling stack (but needs g on some systems).
   1726 	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
   1727 	// It's important not to try to handle a signal between those two steps.
   1728 	sigmask := mp.sigmask
   1729 	sigblock()
   1730 	unminit()
   1731 
   1732 	mnext := lockextra(true)
   1733 	extraMCount++
   1734 	mp.schedlink.set(mnext)
   1735 
   1736 	setg(nil)
   1737 
   1738 	// Commit the release of mp.
   1739 	unlockextra(mp)
   1740 
   1741 	msigrestore(sigmask)
   1742 }
   1743 
   1744 // A helper function for EnsureDropM.
   1745 func getm() uintptr {
   1746 	return uintptr(unsafe.Pointer(getg().m))
   1747 }
   1748 
   1749 var extram uintptr
   1750 var extraMCount uint32 // Protected by lockextra
   1751 var extraMWaiters uint32
   1752 
   1753 // lockextra locks the extra list and returns the list head.
   1754 // The caller must unlock the list by storing a new list head
   1755 // to extram. If nilokay is true, then lockextra will
   1756 // return a nil list head if that's what it finds. If nilokay is false,
   1757 // lockextra will keep waiting until the list head is no longer nil.
   1758 //go:nosplit
   1759 func lockextra(nilokay bool) *m {
   1760 	const locked = 1
   1761 
   1762 	incr := false
   1763 	for {
   1764 		old := atomic.Loaduintptr(&extram)
   1765 		if old == locked {
   1766 			yield := osyield
   1767 			yield()
   1768 			continue
   1769 		}
   1770 		if old == 0 && !nilokay {
   1771 			if !incr {
   1772 				// Add 1 to the number of threads
   1773 				// waiting for an M.
   1774 				// This is cleared by newextram.
   1775 				atomic.Xadd(&extraMWaiters, 1)
   1776 				incr = true
   1777 			}
   1778 			usleep(1)
   1779 			continue
   1780 		}
   1781 		if atomic.Casuintptr(&extram, old, locked) {
   1782 			return (*m)(unsafe.Pointer(old))
   1783 		}
   1784 		yield := osyield
   1785 		yield()
   1786 		continue
   1787 	}
   1788 }
   1789 
   1790 //go:nosplit
   1791 func unlockextra(mp *m) {
   1792 	atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
   1793 }
   1794 
   1795 // execLock serializes exec and clone to avoid bugs or unspecified behaviour
   1796 // around exec'ing while creating/destroying threads.  See issue #19546.
   1797 var execLock rwmutex
   1798 
   1799 // newmHandoff contains a list of m structures that need new OS threads.
   1800 // This is used by newm in situations where newm itself can't safely
   1801 // start an OS thread.
   1802 var newmHandoff struct {
   1803 	lock mutex
   1804 
   1805 	// newm points to a list of M structures that need new OS
   1806 	// threads. The list is linked through m.schedlink.
   1807 	newm muintptr
   1808 
   1809 	// waiting indicates that wake needs to be notified when an m
   1810 	// is put on the list.
   1811 	waiting bool
   1812 	wake    note
   1813 
   1814 	// haveTemplateThread indicates that the templateThread has
   1815 	// been started. This is not protected by lock. Use cas to set
   1816 	// to 1.
   1817 	haveTemplateThread uint32
   1818 }
   1819 
   1820 // Create a new m. It will start off with a call to fn, or else the scheduler.
   1821 // fn needs to be static and not a heap allocated closure.
   1822 // May run with m.p==nil, so write barriers are not allowed.
   1823 //go:nowritebarrierrec
   1824 func newm(fn func(), _p_ *p) {
   1825 	mp := allocm(_p_, fn)
   1826 	mp.nextp.set(_p_)
   1827 	mp.sigmask = initSigmask
   1828 	if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
   1829 		// We're on a locked M or a thread that may have been
   1830 		// started by C. The kernel state of this thread may
   1831 		// be strange (the user may have locked it for that
   1832 		// purpose). We don't want to clone that into another
   1833 		// thread. Instead, ask a known-good thread to create
   1834 		// the thread for us.
   1835 		//
   1836 		// This is disabled on Plan 9. See golang.org/issue/22227.
   1837 		//
   1838 		// TODO: This may be unnecessary on Windows, which
   1839 		// doesn't model thread creation off fork.
   1840 		lock(&newmHandoff.lock)
   1841 		if newmHandoff.haveTemplateThread == 0 {
   1842 			throw("on a locked thread with no template thread")
   1843 		}
   1844 		mp.schedlink = newmHandoff.newm
   1845 		newmHandoff.newm.set(mp)
   1846 		if newmHandoff.waiting {
   1847 			newmHandoff.waiting = false
   1848 			notewakeup(&newmHandoff.wake)
   1849 		}
   1850 		unlock(&newmHandoff.lock)
   1851 		return
   1852 	}
   1853 	newm1(mp)
   1854 }
   1855 
   1856 func newm1(mp *m) {
   1857 	if iscgo {
   1858 		var ts cgothreadstart
   1859 		if _cgo_thread_start == nil {
   1860 			throw("_cgo_thread_start missing")
   1861 		}
   1862 		ts.g.set(mp.g0)
   1863 		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
   1864 		ts.fn = unsafe.Pointer(funcPC(mstart))
   1865 		if msanenabled {
   1866 			msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
   1867 		}
   1868 		execLock.rlock() // Prevent process clone.
   1869 		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
   1870 		execLock.runlock()
   1871 		return
   1872 	}
   1873 	execLock.rlock() // Prevent process clone.
   1874 	newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
   1875 	execLock.runlock()
   1876 }
   1877 
   1878 // startTemplateThread starts the template thread if it is not already
   1879 // running.
   1880 //
   1881 // The calling thread must itself be in a known-good state.
   1882 func startTemplateThread() {
   1883 	if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
   1884 		return
   1885 	}
   1886 	newm(templateThread, nil)
   1887 }
   1888 
   1889 // tmeplateThread is a thread in a known-good state that exists solely
   1890 // to start new threads in known-good states when the calling thread
   1891 // may not be a a good state.
   1892 //
   1893 // Many programs never need this, so templateThread is started lazily
   1894 // when we first enter a state that might lead to running on a thread
   1895 // in an unknown state.
   1896 //
   1897 // templateThread runs on an M without a P, so it must not have write
   1898 // barriers.
   1899 //
   1900 //go:nowritebarrierrec
   1901 func templateThread() {
   1902 	lock(&sched.lock)
   1903 	sched.nmsys++
   1904 	checkdead()
   1905 	unlock(&sched.lock)
   1906 
   1907 	for {
   1908 		lock(&newmHandoff.lock)
   1909 		for newmHandoff.newm != 0 {
   1910 			newm := newmHandoff.newm.ptr()
   1911 			newmHandoff.newm = 0
   1912 			unlock(&newmHandoff.lock)
   1913 			for newm != nil {
   1914 				next := newm.schedlink.ptr()
   1915 				newm.schedlink = 0
   1916 				newm1(newm)
   1917 				newm = next
   1918 			}
   1919 			lock(&newmHandoff.lock)
   1920 		}
   1921 		newmHandoff.waiting = true
   1922 		noteclear(&newmHandoff.wake)
   1923 		unlock(&newmHandoff.lock)
   1924 		notesleep(&newmHandoff.wake)
   1925 	}
   1926 }
   1927 
   1928 // Stops execution of the current m until new work is available.
   1929 // Returns with acquired P.
   1930 func stopm() {
   1931 	_g_ := getg()
   1932 
   1933 	if _g_.m.locks != 0 {
   1934 		throw("stopm holding locks")
   1935 	}
   1936 	if _g_.m.p != 0 {
   1937 		throw("stopm holding p")
   1938 	}
   1939 	if _g_.m.spinning {
   1940 		throw("stopm spinning")
   1941 	}
   1942 
   1943 retry:
   1944 	lock(&sched.lock)
   1945 	mput(_g_.m)
   1946 	unlock(&sched.lock)
   1947 	notesleep(&_g_.m.park)
   1948 	noteclear(&_g_.m.park)
   1949 	if _g_.m.helpgc != 0 {
   1950 		// helpgc() set _g_.m.p and _g_.m.mcache, so we have a P.
   1951 		gchelper()
   1952 		// Undo the effects of helpgc().
   1953 		_g_.m.helpgc = 0
   1954 		_g_.m.mcache = nil
   1955 		_g_.m.p = 0
   1956 		goto retry
   1957 	}
   1958 	acquirep(_g_.m.nextp.ptr())
   1959 	_g_.m.nextp = 0
   1960 }
   1961 
   1962 func mspinning() {
   1963 	// startm's caller incremented nmspinning. Set the new M's spinning.
   1964 	getg().m.spinning = true
   1965 }
   1966 
   1967 // Schedules some M to run the p (creates an M if necessary).
   1968 // If p==nil, tries to get an idle P, if no idle P's does nothing.
   1969 // May run with m.p==nil, so write barriers are not allowed.
   1970 // If spinning is set, the caller has incremented nmspinning and startm will
   1971 // either decrement nmspinning or set m.spinning in the newly started M.
   1972 //go:nowritebarrierrec
   1973 func startm(_p_ *p, spinning bool) {
   1974 	lock(&sched.lock)
   1975 	if _p_ == nil {
   1976 		_p_ = pidleget()
   1977 		if _p_ == nil {
   1978 			unlock(&sched.lock)
   1979 			if spinning {
   1980 				// The caller incremented nmspinning, but there are no idle Ps,
   1981 				// so it's okay to just undo the increment and give up.
   1982 				if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
   1983 					throw("startm: negative nmspinning")
   1984 				}
   1985 			}
   1986 			return
   1987 		}
   1988 	}
   1989 	mp := mget()
   1990 	unlock(&sched.lock)
   1991 	if mp == nil {
   1992 		var fn func()
   1993 		if spinning {
   1994 			// The caller incremented nmspinning, so set m.spinning in the new M.
   1995 			fn = mspinning
   1996 		}
   1997 		newm(fn, _p_)
   1998 		return
   1999 	}
   2000 	if mp.spinning {
   2001 		throw("startm: m is spinning")
   2002 	}
   2003 	if mp.nextp != 0 {
   2004 		throw("startm: m has p")
   2005 	}
   2006 	if spinning && !runqempty(_p_) {
   2007 		throw("startm: p has runnable gs")
   2008 	}
   2009 	// The caller incremented nmspinning, so set m.spinning in the new M.
   2010 	mp.spinning = spinning
   2011 	mp.nextp.set(_p_)
   2012 	notewakeup(&mp.park)
   2013 }
   2014 
   2015 // Hands off P from syscall or locked M.
   2016 // Always runs without a P, so write barriers are not allowed.
   2017 //go:nowritebarrierrec
   2018 func handoffp(_p_ *p) {
   2019 	// handoffp must start an M in any situation where
   2020 	// findrunnable would return a G to run on _p_.
   2021 
   2022 	// if it has local work, start it straight away
   2023 	if !runqempty(_p_) || sched.runqsize != 0 {
   2024 		startm(_p_, false)
   2025 		return
   2026 	}
   2027 	// if it has GC work, start it straight away
   2028 	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
   2029 		startm(_p_, false)
   2030 		return
   2031 	}
   2032 	// no local work, check that there are no spinning/idle M's,
   2033 	// otherwise our help is not required
   2034 	if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
   2035 		startm(_p_, true)
   2036 		return
   2037 	}
   2038 	lock(&sched.lock)
   2039 	if sched.gcwaiting != 0 {
   2040 		_p_.status = _Pgcstop
   2041 		sched.stopwait--
   2042 		if sched.stopwait == 0 {
   2043 			notewakeup(&sched.stopnote)
   2044 		}
   2045 		unlock(&sched.lock)
   2046 		return
   2047 	}
   2048 	if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
   2049 		sched.safePointFn(_p_)
   2050 		sched.safePointWait--
   2051 		if sched.safePointWait == 0 {
   2052 			notewakeup(&sched.safePointNote)
   2053 		}
   2054 	}
   2055 	if sched.runqsize != 0 {
   2056 		unlock(&sched.lock)
   2057 		startm(_p_, false)
   2058 		return
   2059 	}
   2060 	// If this is the last running P and nobody is polling network,
   2061 	// need to wakeup another M to poll network.
   2062 	if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
   2063 		unlock(&sched.lock)
   2064 		startm(_p_, false)
   2065 		return
   2066 	}
   2067 	pidleput(_p_)
   2068 	unlock(&sched.lock)
   2069 }
   2070 
   2071 // Tries to add one more P to execute G's.
   2072 // Called when a G is made runnable (newproc, ready).
   2073 func wakep() {
   2074 	// be conservative about spinning threads
   2075 	if !atomic.Cas(&sched.nmspinning, 0, 1) {
   2076 		return
   2077 	}
   2078 	startm(nil, true)
   2079 }
   2080 
   2081 // Stops execution of the current m that is locked to a g until the g is runnable again.
   2082 // Returns with acquired P.
   2083 func stoplockedm() {
   2084 	_g_ := getg()
   2085 
   2086 	if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
   2087 		throw("stoplockedm: inconsistent locking")
   2088 	}
   2089 	if _g_.m.p != 0 {
   2090 		// Schedule another M to run this p.
   2091 		_p_ := releasep()
   2092 		handoffp(_p_)
   2093 	}
   2094 	incidlelocked(1)
   2095 	// Wait until another thread schedules lockedg again.
   2096 	notesleep(&_g_.m.park)
   2097 	noteclear(&_g_.m.park)
   2098 	status := readgstatus(_g_.m.lockedg.ptr())
   2099 	if status&^_Gscan != _Grunnable {
   2100 		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
   2101 		dumpgstatus(_g_)
   2102 		throw("stoplockedm: not runnable")
   2103 	}
   2104 	acquirep(_g_.m.nextp.ptr())
   2105 	_g_.m.nextp = 0
   2106 }
   2107 
   2108 // Schedules the locked m to run the locked gp.
   2109 // May run during STW, so write barriers are not allowed.
   2110 //go:nowritebarrierrec
   2111 func startlockedm(gp *g) {
   2112 	_g_ := getg()
   2113 
   2114 	mp := gp.lockedm.ptr()
   2115 	if mp == _g_.m {
   2116 		throw("startlockedm: locked to me")
   2117 	}
   2118 	if mp.nextp != 0 {
   2119 		throw("startlockedm: m has p")
   2120 	}
   2121 	// directly handoff current P to the locked m
   2122 	incidlelocked(-1)
   2123 	_p_ := releasep()
   2124 	mp.nextp.set(_p_)
   2125 	notewakeup(&mp.park)
   2126 	stopm()
   2127 }
   2128 
   2129 // Stops the current m for stopTheWorld.
   2130 // Returns when the world is restarted.
   2131 func gcstopm() {
   2132 	_g_ := getg()
   2133 
   2134 	if sched.gcwaiting == 0 {
   2135 		throw("gcstopm: not waiting for gc")
   2136 	}
   2137 	if _g_.m.spinning {
   2138 		_g_.m.spinning = false
   2139 		// OK to just drop nmspinning here,
   2140 		// startTheWorld will unpark threads as necessary.
   2141 		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
   2142 			throw("gcstopm: negative nmspinning")
   2143 		}
   2144 	}
   2145 	_p_ := releasep()
   2146 	lock(&sched.lock)
   2147 	_p_.status = _Pgcstop
   2148 	sched.stopwait--
   2149 	if sched.stopwait == 0 {
   2150 		notewakeup(&sched.stopnote)
   2151 	}
   2152 	unlock(&sched.lock)
   2153 	stopm()
   2154 }
   2155 
   2156 // Schedules gp to run on the current M.
   2157 // If inheritTime is true, gp inherits the remaining time in the
   2158 // current time slice. Otherwise, it starts a new time slice.
   2159 // Never returns.
   2160 //
   2161 // Write barriers are allowed because this is called immediately after
   2162 // acquiring a P in several places.
   2163 //
   2164 //go:yeswritebarrierrec
   2165 func execute(gp *g, inheritTime bool) {
   2166 	_g_ := getg()
   2167 
   2168 	casgstatus(gp, _Grunnable, _Grunning)
   2169 	gp.waitsince = 0
   2170 	gp.preempt = false
   2171 	gp.stackguard0 = gp.stack.lo + _StackGuard
   2172 	if !inheritTime {
   2173 		_g_.m.p.ptr().schedtick++
   2174 	}
   2175 	_g_.m.curg = gp
   2176 	gp.m = _g_.m
   2177 
   2178 	// Check whether the profiler needs to be turned on or off.
   2179 	hz := sched.profilehz
   2180 	if _g_.m.profilehz != hz {
   2181 		setThreadCPUProfiler(hz)
   2182 	}
   2183 
   2184 	if trace.enabled {
   2185 		// GoSysExit has to happen when we have a P, but before GoStart.
   2186 		// So we emit it here.
   2187 		if gp.syscallsp != 0 && gp.sysblocktraced {
   2188 			traceGoSysExit(gp.sysexitticks)
   2189 		}
   2190 		traceGoStart()
   2191 	}
   2192 
   2193 	gogo(&gp.sched)
   2194 }
   2195 
   2196 // Finds a runnable goroutine to execute.
   2197 // Tries to steal from other P's, get g from global queue, poll network.
   2198 func findrunnable() (gp *g, inheritTime bool) {
   2199 	_g_ := getg()
   2200 
   2201 	// The conditions here and in handoffp must agree: if
   2202 	// findrunnable would return a G to run, handoffp must start
   2203 	// an M.
   2204 
   2205 top:
   2206 	_p_ := _g_.m.p.ptr()
   2207 	if sched.gcwaiting != 0 {
   2208 		gcstopm()
   2209 		goto top
   2210 	}
   2211 	if _p_.runSafePointFn != 0 {
   2212 		runSafePointFn()
   2213 	}
   2214 	if fingwait && fingwake {
   2215 		if gp := wakefing(); gp != nil {
   2216 			ready(gp, 0, true)
   2217 		}
   2218 	}
   2219 	if *cgo_yield != nil {
   2220 		asmcgocall(*cgo_yield, nil)
   2221 	}
   2222 
   2223 	// local runq
   2224 	if gp, inheritTime := runqget(_p_); gp != nil {
   2225 		return gp, inheritTime
   2226 	}
   2227 
   2228 	// global runq
   2229 	if sched.runqsize != 0 {
   2230 		lock(&sched.lock)
   2231 		gp := globrunqget(_p_, 0)
   2232 		unlock(&sched.lock)
   2233 		if gp != nil {
   2234 			return gp, false
   2235 		}
   2236 	}
   2237 
   2238 	// Poll network.
   2239 	// This netpoll is only an optimization before we resort to stealing.
   2240 	// We can safely skip it if there are no waiters or a thread is blocked
   2241 	// in netpoll already. If there is any kind of logical race with that
   2242 	// blocked thread (e.g. it has already returned from netpoll, but does
   2243 	// not set lastpoll yet), this thread will do blocking netpoll below
   2244 	// anyway.
   2245 	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
   2246 		if gp := netpoll(false); gp != nil { // non-blocking
   2247 			// netpoll returns list of goroutines linked by schedlink.
   2248 			injectglist(gp.schedlink.ptr())
   2249 			casgstatus(gp, _Gwaiting, _Grunnable)
   2250 			if trace.enabled {
   2251 				traceGoUnpark(gp, 0)
   2252 			}
   2253 			return gp, false
   2254 		}
   2255 	}
   2256 
   2257 	// Steal work from other P's.
   2258 	procs := uint32(gomaxprocs)
   2259 	if atomic.Load(&sched.npidle) == procs-1 {
   2260 		// Either GOMAXPROCS=1 or everybody, except for us, is idle already.
   2261 		// New work can appear from returning syscall/cgocall, network or timers.
   2262 		// Neither of that submits to local run queues, so no point in stealing.
   2263 		goto stop
   2264 	}
   2265 	// If number of spinning M's >= number of busy P's, block.
   2266 	// This is necessary to prevent excessive CPU consumption
   2267 	// when GOMAXPROCS>>1 but the program parallelism is low.
   2268 	if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
   2269 		goto stop
   2270 	}
   2271 	if !_g_.m.spinning {
   2272 		_g_.m.spinning = true
   2273 		atomic.Xadd(&sched.nmspinning, 1)
   2274 	}
   2275 	for i := 0; i < 4; i++ {
   2276 		for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
   2277 			if sched.gcwaiting != 0 {
   2278 				goto top
   2279 			}
   2280 			stealRunNextG := i > 2 // first look for ready queues with more than 1 g
   2281 			if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
   2282 				return gp, false
   2283 			}
   2284 		}
   2285 	}
   2286 
   2287 stop:
   2288 
   2289 	// We have nothing to do. If we're in the GC mark phase, can
   2290 	// safely scan and blacken objects, and have work to do, run
   2291 	// idle-time marking rather than give up the P.
   2292 	if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
   2293 		_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
   2294 		gp := _p_.gcBgMarkWorker.ptr()
   2295 		casgstatus(gp, _Gwaiting, _Grunnable)
   2296 		if trace.enabled {
   2297 			traceGoUnpark(gp, 0)
   2298 		}
   2299 		return gp, false
   2300 	}
   2301 
   2302 	// Before we drop our P, make a snapshot of the allp slice,
   2303 	// which can change underfoot once we no longer block
   2304 	// safe-points. We don't need to snapshot the contents because
   2305 	// everything up to cap(allp) is immutable.
   2306 	allpSnapshot := allp
   2307 
   2308 	// return P and block
   2309 	lock(&sched.lock)
   2310 	if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
   2311 		unlock(&sched.lock)
   2312 		goto top
   2313 	}
   2314 	if sched.runqsize != 0 {
   2315 		gp := globrunqget(_p_, 0)
   2316 		unlock(&sched.lock)
   2317 		return gp, false
   2318 	}
   2319 	if releasep() != _p_ {
   2320 		throw("findrunnable: wrong p")
   2321 	}
   2322 	pidleput(_p_)
   2323 	unlock(&sched.lock)
   2324 
   2325 	// Delicate dance: thread transitions from spinning to non-spinning state,
   2326 	// potentially concurrently with submission of new goroutines. We must
   2327 	// drop nmspinning first and then check all per-P queues again (with
   2328 	// #StoreLoad memory barrier in between). If we do it the other way around,
   2329 	// another thread can submit a goroutine after we've checked all run queues
   2330 	// but before we drop nmspinning; as the result nobody will unpark a thread
   2331 	// to run the goroutine.
   2332 	// If we discover new work below, we need to restore m.spinning as a signal
   2333 	// for resetspinning to unpark a new worker thread (because there can be more
   2334 	// than one starving goroutine). However, if after discovering new work
   2335 	// we also observe no idle Ps, it is OK to just park the current thread:
   2336 	// the system is fully loaded so no spinning threads are required.
   2337 	// Also see "Worker thread parking/unparking" comment at the top of the file.
   2338 	wasSpinning := _g_.m.spinning
   2339 	if _g_.m.spinning {
   2340 		_g_.m.spinning = false
   2341 		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
   2342 			throw("findrunnable: negative nmspinning")
   2343 		}
   2344 	}
   2345 
   2346 	// check all runqueues once again
   2347 	for _, _p_ := range allpSnapshot {
   2348 		if !runqempty(_p_) {
   2349 			lock(&sched.lock)
   2350 			_p_ = pidleget()
   2351 			unlock(&sched.lock)
   2352 			if _p_ != nil {
   2353 				acquirep(_p_)
   2354 				if wasSpinning {
   2355 					_g_.m.spinning = true
   2356 					atomic.Xadd(&sched.nmspinning, 1)
   2357 				}
   2358 				goto top
   2359 			}
   2360 			break
   2361 		}
   2362 	}
   2363 
   2364 	// Check for idle-priority GC work again.
   2365 	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
   2366 		lock(&sched.lock)
   2367 		_p_ = pidleget()
   2368 		if _p_ != nil && _p_.gcBgMarkWorker == 0 {
   2369 			pidleput(_p_)
   2370 			_p_ = nil
   2371 		}
   2372 		unlock(&sched.lock)
   2373 		if _p_ != nil {
   2374 			acquirep(_p_)
   2375 			if wasSpinning {
   2376 				_g_.m.spinning = true
   2377 				atomic.Xadd(&sched.nmspinning, 1)
   2378 			}
   2379 			// Go back to idle GC check.
   2380 			goto stop
   2381 		}
   2382 	}
   2383 
   2384 	// poll network
   2385 	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
   2386 		if _g_.m.p != 0 {
   2387 			throw("findrunnable: netpoll with p")
   2388 		}
   2389 		if _g_.m.spinning {
   2390 			throw("findrunnable: netpoll with spinning")
   2391 		}
   2392 		gp := netpoll(true) // block until new work is available
   2393 		atomic.Store64(&sched.lastpoll, uint64(nanotime()))
   2394 		if gp != nil {
   2395 			lock(&sched.lock)
   2396 			_p_ = pidleget()
   2397 			unlock(&sched.lock)
   2398 			if _p_ != nil {
   2399 				acquirep(_p_)
   2400 				injectglist(gp.schedlink.ptr())
   2401 				casgstatus(gp, _Gwaiting, _Grunnable)
   2402 				if trace.enabled {
   2403 					traceGoUnpark(gp, 0)
   2404 				}
   2405 				return gp, false
   2406 			}
   2407 			injectglist(gp)
   2408 		}
   2409 	}
   2410 	stopm()
   2411 	goto top
   2412 }
   2413 
   2414 // pollWork returns true if there is non-background work this P could
   2415 // be doing. This is a fairly lightweight check to be used for
   2416 // background work loops, like idle GC. It checks a subset of the
   2417 // conditions checked by the actual scheduler.
   2418 func pollWork() bool {
   2419 	if sched.runqsize != 0 {
   2420 		return true
   2421 	}
   2422 	p := getg().m.p.ptr()
   2423 	if !runqempty(p) {
   2424 		return true
   2425 	}
   2426 	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
   2427 		if gp := netpoll(false); gp != nil {
   2428 			injectglist(gp)
   2429 			return true
   2430 		}
   2431 	}
   2432 	return false
   2433 }
   2434 
   2435 func resetspinning() {
   2436 	_g_ := getg()
   2437 	if !_g_.m.spinning {
   2438 		throw("resetspinning: not a spinning m")
   2439 	}
   2440 	_g_.m.spinning = false
   2441 	nmspinning := atomic.Xadd(&sched.nmspinning, -1)
   2442 	if int32(nmspinning) < 0 {
   2443 		throw("findrunnable: negative nmspinning")
   2444 	}
   2445 	// M wakeup policy is deliberately somewhat conservative, so check if we
   2446 	// need to wakeup another P here. See "Worker thread parking/unparking"
   2447 	// comment at the top of the file for details.
   2448 	if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
   2449 		wakep()
   2450 	}
   2451 }
   2452 
   2453 // Injects the list of runnable G's into the scheduler.
   2454 // Can run concurrently with GC.
   2455 func injectglist(glist *g) {
   2456 	if glist == nil {
   2457 		return
   2458 	}
   2459 	if trace.enabled {
   2460 		for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
   2461 			traceGoUnpark(gp, 0)
   2462 		}
   2463 	}
   2464 	lock(&sched.lock)
   2465 	var n int
   2466 	for n = 0; glist != nil; n++ {
   2467 		gp := glist
   2468 		glist = gp.schedlink.ptr()
   2469 		casgstatus(gp, _Gwaiting, _Grunnable)
   2470 		globrunqput(gp)
   2471 	}
   2472 	unlock(&sched.lock)
   2473 	for ; n != 0 && sched.npidle != 0; n-- {
   2474 		startm(nil, false)
   2475 	}
   2476 }
   2477 
   2478 // One round of scheduler: find a runnable goroutine and execute it.
   2479 // Never returns.
   2480 func schedule() {
   2481 	_g_ := getg()
   2482 
   2483 	if _g_.m.locks != 0 {
   2484 		throw("schedule: holding locks")
   2485 	}
   2486 
   2487 	if _g_.m.lockedg != 0 {
   2488 		stoplockedm()
   2489 		execute(_g_.m.lockedg.ptr(), false) // Never returns.
   2490 	}
   2491 
   2492 	// We should not schedule away from a g that is executing a cgo call,
   2493 	// since the cgo call is using the m's g0 stack.
   2494 	if _g_.m.incgo {
   2495 		throw("schedule: in cgo")
   2496 	}
   2497 
   2498 top:
   2499 	if sched.gcwaiting != 0 {
   2500 		gcstopm()
   2501 		goto top
   2502 	}
   2503 	if _g_.m.p.ptr().runSafePointFn != 0 {
   2504 		runSafePointFn()
   2505 	}
   2506 
   2507 	var gp *g
   2508 	var inheritTime bool
   2509 	if trace.enabled || trace.shutdown {
   2510 		gp = traceReader()
   2511 		if gp != nil {
   2512 			casgstatus(gp, _Gwaiting, _Grunnable)
   2513 			traceGoUnpark(gp, 0)
   2514 		}
   2515 	}
   2516 	if gp == nil && gcBlackenEnabled != 0 {
   2517 		gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
   2518 	}
   2519 	if gp == nil {
   2520 		// Check the global runnable queue once in a while to ensure fairness.
   2521 		// Otherwise two goroutines can completely occupy the local runqueue
   2522 		// by constantly respawning each other.
   2523 		if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
   2524 			lock(&sched.lock)
   2525 			gp = globrunqget(_g_.m.p.ptr(), 1)
   2526 			unlock(&sched.lock)
   2527 		}
   2528 	}
   2529 	if gp == nil {
   2530 		gp, inheritTime = runqget(_g_.m.p.ptr())
   2531 		if gp != nil && _g_.m.spinning {
   2532 			throw("schedule: spinning with local work")
   2533 		}
   2534 	}
   2535 	if gp == nil {
   2536 		gp, inheritTime = findrunnable() // blocks until work is available
   2537 	}
   2538 
   2539 	// This thread is going to run a goroutine and is not spinning anymore,
   2540 	// so if it was marked as spinning we need to reset it now and potentially
   2541 	// start a new spinning M.
   2542 	if _g_.m.spinning {
   2543 		resetspinning()
   2544 	}
   2545 
   2546 	if gp.lockedm != 0 {
   2547 		// Hands off own p to the locked m,
   2548 		// then blocks waiting for a new p.
   2549 		startlockedm(gp)
   2550 		goto top
   2551 	}
   2552 
   2553 	execute(gp, inheritTime)
   2554 }
   2555 
   2556 // dropg removes the association between m and the current goroutine m->curg (gp for short).
   2557 // Typically a caller sets gp's status away from Grunning and then
   2558 // immediately calls dropg to finish the job. The caller is also responsible
   2559 // for arranging that gp will be restarted using ready at an
   2560 // appropriate time. After calling dropg and arranging for gp to be
   2561 // readied later, the caller can do other work but eventually should
   2562 // call schedule to restart the scheduling of goroutines on this m.
   2563 func dropg() {
   2564 	_g_ := getg()
   2565 
   2566 	setMNoWB(&_g_.m.curg.m, nil)
   2567 	setGNoWB(&_g_.m.curg, nil)
   2568 }
   2569 
   2570 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
   2571 	unlock((*mutex)(lock))
   2572 	return true
   2573 }
   2574 
   2575 // park continuation on g0.
   2576 func park_m(gp *g) {
   2577 	_g_ := getg()
   2578 
   2579 	if trace.enabled {
   2580 		traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
   2581 	}
   2582 
   2583 	casgstatus(gp, _Grunning, _Gwaiting)
   2584 	dropg()
   2585 
   2586 	if _g_.m.waitunlockf != nil {
   2587 		fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
   2588 		ok := fn(gp, _g_.m.waitlock)
   2589 		_g_.m.waitunlockf = nil
   2590 		_g_.m.waitlock = nil
   2591 		if !ok {
   2592 			if trace.enabled {
   2593 				traceGoUnpark(gp, 2)
   2594 			}
   2595 			casgstatus(gp, _Gwaiting, _Grunnable)
   2596 			execute(gp, true) // Schedule it back, never returns.
   2597 		}
   2598 	}
   2599 	schedule()
   2600 }
   2601 
   2602 func goschedImpl(gp *g) {
   2603 	status := readgstatus(gp)
   2604 	if status&^_Gscan != _Grunning {
   2605 		dumpgstatus(gp)
   2606 		throw("bad g status")
   2607 	}
   2608 	casgstatus(gp, _Grunning, _Grunnable)
   2609 	dropg()
   2610 	lock(&sched.lock)
   2611 	globrunqput(gp)
   2612 	unlock(&sched.lock)
   2613 
   2614 	schedule()
   2615 }
   2616 
   2617 // Gosched continuation on g0.
   2618 func gosched_m(gp *g) {
   2619 	if trace.enabled {
   2620 		traceGoSched()
   2621 	}
   2622 	goschedImpl(gp)
   2623 }
   2624 
   2625 // goschedguarded is a forbidden-states-avoided version of gosched_m
   2626 func goschedguarded_m(gp *g) {
   2627 
   2628 	if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
   2629 		gogo(&gp.sched) // never return
   2630 	}
   2631 
   2632 	if trace.enabled {
   2633 		traceGoSched()
   2634 	}
   2635 	goschedImpl(gp)
   2636 }
   2637 
   2638 func gopreempt_m(gp *g) {
   2639 	if trace.enabled {
   2640 		traceGoPreempt()
   2641 	}
   2642 	goschedImpl(gp)
   2643 }
   2644 
   2645 // Finishes execution of the current goroutine.
   2646 func goexit1() {
   2647 	if raceenabled {
   2648 		racegoend()
   2649 	}
   2650 	if trace.enabled {
   2651 		traceGoEnd()
   2652 	}
   2653 	mcall(goexit0)
   2654 }
   2655 
   2656 // goexit continuation on g0.
   2657 func goexit0(gp *g) {
   2658 	_g_ := getg()
   2659 
   2660 	casgstatus(gp, _Grunning, _Gdead)
   2661 	if isSystemGoroutine(gp) {
   2662 		atomic.Xadd(&sched.ngsys, -1)
   2663 	}
   2664 	gp.m = nil
   2665 	locked := gp.lockedm != 0
   2666 	gp.lockedm = 0
   2667 	_g_.m.lockedg = 0
   2668 	gp.paniconfault = false
   2669 	gp._defer = nil // should be true already but just in case.
   2670 	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
   2671 	gp.writebuf = nil
   2672 	gp.waitreason = ""
   2673 	gp.param = nil
   2674 	gp.labels = nil
   2675 	gp.timer = nil
   2676 
   2677 	if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
   2678 		// Flush assist credit to the global pool. This gives
   2679 		// better information to pacing if the application is
   2680 		// rapidly creating an exiting goroutines.
   2681 		scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
   2682 		atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
   2683 		gp.gcAssistBytes = 0
   2684 	}
   2685 
   2686 	// Note that gp's stack scan is now "valid" because it has no
   2687 	// stack.
   2688 	gp.gcscanvalid = true
   2689 	dropg()
   2690 
   2691 	if _g_.m.lockedInt != 0 {
   2692 		print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
   2693 		throw("internal lockOSThread error")
   2694 	}
   2695 	_g_.m.lockedExt = 0
   2696 	gfput(_g_.m.p.ptr(), gp)
   2697 	if locked {
   2698 		// The goroutine may have locked this thread because
   2699 		// it put it in an unusual kernel state. Kill it
   2700 		// rather than returning it to the thread pool.
   2701 
   2702 		// Return to mstart, which will release the P and exit
   2703 		// the thread.
   2704 		if GOOS != "plan9" { // See golang.org/issue/22227.
   2705 			gogo(&_g_.m.g0.sched)
   2706 		}
   2707 	}
   2708 	schedule()
   2709 }
   2710 
   2711 // save updates getg().sched to refer to pc and sp so that a following
   2712 // gogo will restore pc and sp.
   2713 //
   2714 // save must not have write barriers because invoking a write barrier
   2715 // can clobber getg().sched.
   2716 //
   2717 //go:nosplit
   2718 //go:nowritebarrierrec
   2719 func save(pc, sp uintptr) {
   2720 	_g_ := getg()
   2721 
   2722 	_g_.sched.pc = pc
   2723 	_g_.sched.sp = sp
   2724 	_g_.sched.lr = 0
   2725 	_g_.sched.ret = 0
   2726 	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
   2727 	// We need to ensure ctxt is zero, but can't have a write
   2728 	// barrier here. However, it should always already be zero.
   2729 	// Assert that.
   2730 	if _g_.sched.ctxt != nil {
   2731 		badctxt()
   2732 	}
   2733 }
   2734 
   2735 // The goroutine g is about to enter a system call.
   2736 // Record that it's not using the cpu anymore.
   2737 // This is called only from the go syscall library and cgocall,
   2738 // not from the low-level system calls used by the runtime.
   2739 //
   2740 // Entersyscall cannot split the stack: the gosave must
   2741 // make g->sched refer to the caller's stack segment, because
   2742 // entersyscall is going to return immediately after.
   2743 //
   2744 // Nothing entersyscall calls can split the stack either.
   2745 // We cannot safely move the stack during an active call to syscall,
   2746 // because we do not know which of the uintptr arguments are
   2747 // really pointers (back into the stack).
   2748 // In practice, this means that we make the fast path run through
   2749 // entersyscall doing no-split things, and the slow path has to use systemstack
   2750 // to run bigger things on the system stack.
   2751 //
   2752 // reentersyscall is the entry point used by cgo callbacks, where explicitly
   2753 // saved SP and PC are restored. This is needed when exitsyscall will be called
   2754 // from a function further up in the call stack than the parent, as g->syscallsp
   2755 // must always point to a valid stack frame. entersyscall below is the normal
   2756 // entry point for syscalls, which obtains the SP and PC from the caller.
   2757 //
   2758 // Syscall tracing:
   2759 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
   2760 // If the syscall does not block, that is it, we do not emit any other events.
   2761 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
   2762 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
   2763 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
   2764 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
   2765 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
   2766 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
   2767 // and we wait for the increment before emitting traceGoSysExit.
   2768 // Note that the increment is done even if tracing is not enabled,
   2769 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
   2770 //
   2771 //go:nosplit
   2772 func reentersyscall(pc, sp uintptr) {
   2773 	_g_ := getg()
   2774 
   2775 	// Disable preemption because during this function g is in Gsyscall status,
   2776 	// but can have inconsistent g->sched, do not let GC observe it.
   2777 	_g_.m.locks++
   2778 
   2779 	// Entersyscall must not call any function that might split/grow the stack.
   2780 	// (See details in comment above.)
   2781 	// Catch calls that might, by replacing the stack guard with something that
   2782 	// will trip any stack check and leaving a flag to tell newstack to die.
   2783 	_g_.stackguard0 = stackPreempt
   2784 	_g_.throwsplit = true
   2785 
   2786 	// Leave SP around for GC and traceback.
   2787 	save(pc, sp)
   2788 	_g_.syscallsp = sp
   2789 	_g_.syscallpc = pc
   2790 	casgstatus(_g_, _Grunning, _Gsyscall)
   2791 	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
   2792 		systemstack(func() {
   2793 			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
   2794 			throw("entersyscall")
   2795 		})
   2796 	}
   2797 
   2798 	if trace.enabled {
   2799 		systemstack(traceGoSysCall)
   2800 		// systemstack itself clobbers g.sched.{pc,sp} and we might
   2801 		// need them later when the G is genuinely blocked in a
   2802 		// syscall
   2803 		save(pc, sp)
   2804 	}
   2805 
   2806 	if atomic.Load(&sched.sysmonwait) != 0 {
   2807 		systemstack(entersyscall_sysmon)
   2808 		save(pc, sp)
   2809 	}
   2810 
   2811 	if _g_.m.p.ptr().runSafePointFn != 0 {
   2812 		// runSafePointFn may stack split if run on this stack
   2813 		systemstack(runSafePointFn)
   2814 		save(pc, sp)
   2815 	}
   2816 
   2817 	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
   2818 	_g_.sysblocktraced = true
   2819 	_g_.m.mcache = nil
   2820 	_g_.m.p.ptr().m = 0
   2821 	atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
   2822 	if sched.gcwaiting != 0 {
   2823 		systemstack(entersyscall_gcwait)
   2824 		save(pc, sp)
   2825 	}
   2826 
   2827 	// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
   2828 	// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
   2829 	// Morestack detects this case and throws.
   2830 	_g_.stackguard0 = stackPreempt
   2831 	_g_.m.locks--
   2832 }
   2833 
   2834 // Standard syscall entry used by the go syscall library and normal cgo calls.
   2835 //go:nosplit
   2836 func entersyscall(dummy int32) {
   2837 	reentersyscall(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
   2838 }
   2839 
   2840 func entersyscall_sysmon() {
   2841 	lock(&sched.lock)
   2842 	if atomic.Load(&sched.sysmonwait) != 0 {
   2843 		atomic.Store(&sched.sysmonwait, 0)
   2844 		notewakeup(&sched.sysmonnote)
   2845 	}
   2846 	unlock(&sched.lock)
   2847 }
   2848 
   2849 func entersyscall_gcwait() {
   2850 	_g_ := getg()
   2851 	_p_ := _g_.m.p.ptr()
   2852 
   2853 	lock(&sched.lock)
   2854 	if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
   2855 		if trace.enabled {
   2856 			traceGoSysBlock(_p_)
   2857 			traceProcStop(_p_)
   2858 		}
   2859 		_p_.syscalltick++
   2860 		if sched.stopwait--; sched.stopwait == 0 {
   2861 			notewakeup(&sched.stopnote)
   2862 		}
   2863 	}
   2864 	unlock(&sched.lock)
   2865 }
   2866 
   2867 // The same as entersyscall(), but with a hint that the syscall is blocking.
   2868 //go:nosplit
   2869 func entersyscallblock(dummy int32) {
   2870 	_g_ := getg()
   2871 
   2872 	_g_.m.locks++ // see comment in entersyscall
   2873 	_g_.throwsplit = true
   2874 	_g_.stackguard0 = stackPreempt // see comment in entersyscall
   2875 	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
   2876 	_g_.sysblocktraced = true
   2877 	_g_.m.p.ptr().syscalltick++
   2878 
   2879 	// Leave SP around for GC and traceback.
   2880 	pc := getcallerpc()
   2881 	sp := getcallersp(unsafe.Pointer(&dummy))
   2882 	save(pc, sp)
   2883 	_g_.syscallsp = _g_.sched.sp
   2884 	_g_.syscallpc = _g_.sched.pc
   2885 	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
   2886 		sp1 := sp
   2887 		sp2 := _g_.sched.sp
   2888 		sp3 := _g_.syscallsp
   2889 		systemstack(func() {
   2890 			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
   2891 			throw("entersyscallblock")
   2892 		})
   2893 	}
   2894 	casgstatus(_g_, _Grunning, _Gsyscall)
   2895 	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
   2896 		systemstack(func() {
   2897 			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
   2898 			throw("entersyscallblock")
   2899 		})
   2900 	}
   2901 
   2902 	systemstack(entersyscallblock_handoff)
   2903 
   2904 	// Resave for traceback during blocked call.
   2905 	save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
   2906 
   2907 	_g_.m.locks--
   2908 }
   2909 
   2910 func entersyscallblock_handoff() {
   2911 	if trace.enabled {
   2912 		traceGoSysCall()
   2913 		traceGoSysBlock(getg().m.p.ptr())
   2914 	}
   2915 	handoffp(releasep())
   2916 }
   2917 
   2918 // The goroutine g exited its system call.
   2919 // Arrange for it to run on a cpu again.
   2920 // This is called only from the go syscall library, not
   2921 // from the low-level system calls used by the runtime.
   2922 //
   2923 // Write barriers are not allowed because our P may have been stolen.
   2924 //
   2925 //go:nosplit
   2926 //go:nowritebarrierrec
   2927 func exitsyscall(dummy int32) {
   2928 	_g_ := getg()
   2929 
   2930 	_g_.m.locks++ // see comment in entersyscall
   2931 	if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
   2932 		// throw calls print which may try to grow the stack,
   2933 		// but throwsplit == true so the stack can not be grown;
   2934 		// use systemstack to avoid that possible problem.
   2935 		systemstack(func() {
   2936 			throw("exitsyscall: syscall frame is no longer valid")
   2937 		})
   2938 	}
   2939 
   2940 	_g_.waitsince = 0
   2941 	oldp := _g_.m.p.ptr()
   2942 	if exitsyscallfast() {
   2943 		if _g_.m.mcache == nil {
   2944 			systemstack(func() {
   2945 				throw("lost mcache")
   2946 			})
   2947 		}
   2948 		if trace.enabled {
   2949 			if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
   2950 				systemstack(traceGoStart)
   2951 			}
   2952 		}
   2953 		// There's a cpu for us, so we can run.
   2954 		_g_.m.p.ptr().syscalltick++
   2955 		// We need to cas the status and scan before resuming...
   2956 		casgstatus(_g_, _Gsyscall, _Grunning)
   2957 
   2958 		// Garbage collector isn't running (since we are),
   2959 		// so okay to clear syscallsp.
   2960 		_g_.syscallsp = 0
   2961 		_g_.m.locks--
   2962 		if _g_.preempt {
   2963 			// restore the preemption request in case we've cleared it in newstack
   2964 			_g_.stackguard0 = stackPreempt
   2965 		} else {
   2966 			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
   2967 			_g_.stackguard0 = _g_.stack.lo + _StackGuard
   2968 		}
   2969 		_g_.throwsplit = false
   2970 		return
   2971 	}
   2972 
   2973 	_g_.sysexitticks = 0
   2974 	if trace.enabled {
   2975 		// Wait till traceGoSysBlock event is emitted.
   2976 		// This ensures consistency of the trace (the goroutine is started after it is blocked).
   2977 		for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
   2978 			osyield()
   2979 		}
   2980 		// We can't trace syscall exit right now because we don't have a P.
   2981 		// Tracing code can invoke write barriers that cannot run without a P.
   2982 		// So instead we remember the syscall exit time and emit the event
   2983 		// in execute when we have a P.
   2984 		_g_.sysexitticks = cputicks()
   2985 	}
   2986 
   2987 	_g_.m.locks--
   2988 
   2989 	// Call the scheduler.
   2990 	mcall(exitsyscall0)
   2991 
   2992 	if _g_.m.mcache == nil {
   2993 		systemstack(func() {
   2994 			throw("lost mcache")
   2995 		})
   2996 	}
   2997 
   2998 	// Scheduler returned, so we're allowed to run now.
   2999 	// Delete the syscallsp information that we left for
   3000 	// the garbage collector during the system call.
   3001 	// Must wait until now because until gosched returns
   3002 	// we don't know for sure that the garbage collector
   3003 	// is not running.
   3004 	_g_.syscallsp = 0
   3005 	_g_.m.p.ptr().syscalltick++
   3006 	_g_.throwsplit = false
   3007 }
   3008 
   3009 //go:nosplit
   3010 func exitsyscallfast() bool {
   3011 	_g_ := getg()
   3012 
   3013 	// Freezetheworld sets stopwait but does not retake P's.
   3014 	if sched.stopwait == freezeStopWait {
   3015 		_g_.m.mcache = nil
   3016 		_g_.m.p = 0
   3017 		return false
   3018 	}
   3019 
   3020 	// Try to re-acquire the last P.
   3021 	if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
   3022 		// There's a cpu for us, so we can run.
   3023 		exitsyscallfast_reacquired()
   3024 		return true
   3025 	}
   3026 
   3027 	// Try to get any other idle P.
   3028 	oldp := _g_.m.p.ptr()
   3029 	_g_.m.mcache = nil
   3030 	_g_.m.p = 0
   3031 	if sched.pidle != 0 {
   3032 		var ok bool
   3033 		systemstack(func() {
   3034 			ok = exitsyscallfast_pidle()
   3035 			if ok && trace.enabled {
   3036 				if oldp != nil {
   3037 					// Wait till traceGoSysBlock event is emitted.
   3038 					// This ensures consistency of the trace (the goroutine is started after it is blocked).
   3039 					for oldp.syscalltick == _g_.m.syscalltick {
   3040 						osyield()
   3041 					}
   3042 				}
   3043 				traceGoSysExit(0)
   3044 			}
   3045 		})
   3046 		if ok {
   3047 			return true
   3048 		}
   3049 	}
   3050 	return false
   3051 }
   3052 
   3053 // exitsyscallfast_reacquired is the exitsyscall path on which this G
   3054 // has successfully reacquired the P it was running on before the
   3055 // syscall.
   3056 //
   3057 // This function is allowed to have write barriers because exitsyscall
   3058 // has acquired a P at this point.
   3059 //
   3060 //go:yeswritebarrierrec
   3061 //go:nosplit
   3062 func exitsyscallfast_reacquired() {
   3063 	_g_ := getg()
   3064 	_g_.m.mcache = _g_.m.p.ptr().mcache
   3065 	_g_.m.p.ptr().m.set(_g_.m)
   3066 	if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
   3067 		if trace.enabled {
   3068 			// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
   3069 			// traceGoSysBlock for this syscall was already emitted,
   3070 			// but here we effectively retake the p from the new syscall running on the same p.
   3071 			systemstack(func() {
   3072 				// Denote blocking of the new syscall.
   3073 				traceGoSysBlock(_g_.m.p.ptr())
   3074 				// Denote completion of the current syscall.
   3075 				traceGoSysExit(0)
   3076 			})
   3077 		}
   3078 		_g_.m.p.ptr().syscalltick++
   3079 	}
   3080 }
   3081 
   3082 func exitsyscallfast_pidle() bool {
   3083 	lock(&sched.lock)
   3084 	_p_ := pidleget()
   3085 	if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
   3086 		atomic.Store(&sched.sysmonwait, 0)
   3087 		notewakeup(&sched.sysmonnote)
   3088 	}
   3089 	unlock(&sched.lock)
   3090 	if _p_ != nil {
   3091 		acquirep(_p_)
   3092 		return true
   3093 	}
   3094 	return false
   3095 }
   3096 
   3097 // exitsyscall slow path on g0.
   3098 // Failed to acquire P, enqueue gp as runnable.
   3099 //
   3100 //go:nowritebarrierrec
   3101 func exitsyscall0(gp *g) {
   3102 	_g_ := getg()
   3103 
   3104 	casgstatus(gp, _Gsyscall, _Grunnable)
   3105 	dropg()
   3106 	lock(&sched.lock)
   3107 	_p_ := pidleget()
   3108 	if _p_ == nil {
   3109 		globrunqput(gp)
   3110 	} else if atomic.Load(&sched.sysmonwait) != 0 {
   3111 		atomic.Store(&sched.sysmonwait, 0)
   3112 		notewakeup(&sched.sysmonnote)
   3113 	}
   3114 	unlock(&sched.lock)
   3115 	if _p_ != nil {
   3116 		acquirep(_p_)
   3117 		execute(gp, false) // Never returns.
   3118 	}
   3119 	if _g_.m.lockedg != 0 {
   3120 		// Wait until another thread schedules gp and so m again.
   3121 		stoplockedm()
   3122 		execute(gp, false) // Never returns.
   3123 	}
   3124 	stopm()
   3125 	schedule() // Never returns.
   3126 }
   3127 
   3128 func beforefork() {
   3129 	gp := getg().m.curg
   3130 
   3131 	// Block signals during a fork, so that the child does not run
   3132 	// a signal handler before exec if a signal is sent to the process
   3133 	// group. See issue #18600.
   3134 	gp.m.locks++
   3135 	msigsave(gp.m)
   3136 	sigblock()
   3137 
   3138 	// This function is called before fork in syscall package.
   3139 	// Code between fork and exec must not allocate memory nor even try to grow stack.
   3140 	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
   3141 	// runtime_AfterFork will undo this in parent process, but not in child.
   3142 	gp.stackguard0 = stackFork
   3143 }
   3144 
   3145 // Called from syscall package before fork.
   3146 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
   3147 //go:nosplit
   3148 func syscall_runtime_BeforeFork() {
   3149 	systemstack(beforefork)
   3150 }
   3151 
   3152 func afterfork() {
   3153 	gp := getg().m.curg
   3154 
   3155 	// See the comments in beforefork.
   3156 	gp.stackguard0 = gp.stack.lo + _StackGuard
   3157 
   3158 	msigrestore(gp.m.sigmask)
   3159 
   3160 	gp.m.locks--
   3161 }
   3162 
   3163 // Called from syscall package after fork in parent.
   3164 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
   3165 //go:nosplit
   3166 func syscall_runtime_AfterFork() {
   3167 	systemstack(afterfork)
   3168 }
   3169 
   3170 // inForkedChild is true while manipulating signals in the child process.
   3171 // This is used to avoid calling libc functions in case we are using vfork.
   3172 var inForkedChild bool
   3173 
   3174 // Called from syscall package after fork in child.
   3175 // It resets non-sigignored signals to the default handler, and
   3176 // restores the signal mask in preparation for the exec.
   3177 //
   3178 // Because this might be called during a vfork, and therefore may be
   3179 // temporarily sharing address space with the parent process, this must
   3180 // not change any global variables or calling into C code that may do so.
   3181 //
   3182 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
   3183 //go:nosplit
   3184 //go:nowritebarrierrec
   3185 func syscall_runtime_AfterForkInChild() {
   3186 	// It's OK to change the global variable inForkedChild here
   3187 	// because we are going to change it back. There is no race here,
   3188 	// because if we are sharing address space with the parent process,
   3189 	// then the parent process can not be running concurrently.
   3190 	inForkedChild = true
   3191 
   3192 	clearSignalHandlers()
   3193 
   3194 	// When we are the child we are the only thread running,
   3195 	// so we know that nothing else has changed gp.m.sigmask.
   3196 	msigrestore(getg().m.sigmask)
   3197 
   3198 	inForkedChild = false
   3199 }
   3200 
   3201 // Called from syscall package before Exec.
   3202 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
   3203 func syscall_runtime_BeforeExec() {
   3204 	// Prevent thread creation during exec.
   3205 	execLock.lock()
   3206 }
   3207 
   3208 // Called from syscall package after Exec.
   3209 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
   3210 func syscall_runtime_AfterExec() {
   3211 	execLock.unlock()
   3212 }
   3213 
   3214 // Allocate a new g, with a stack big enough for stacksize bytes.
   3215 func malg(stacksize int32) *g {
   3216 	newg := new(g)
   3217 	if stacksize >= 0 {
   3218 		stacksize = round2(_StackSystem + stacksize)
   3219 		systemstack(func() {
   3220 			newg.stack = stackalloc(uint32(stacksize))
   3221 		})
   3222 		newg.stackguard0 = newg.stack.lo + _StackGuard
   3223 		newg.stackguard1 = ^uintptr(0)
   3224 	}
   3225 	return newg
   3226 }
   3227 
   3228 // Create a new g running fn with siz bytes of arguments.
   3229 // Put it on the queue of g's waiting to run.
   3230 // The compiler turns a go statement into a call to this.
   3231 // Cannot split the stack because it assumes that the arguments
   3232 // are available sequentially after &fn; they would not be
   3233 // copied if a stack split occurred.
   3234 //go:nosplit
   3235 func newproc(siz int32, fn *funcval) {
   3236 	argp := add(unsafe.Pointer(&fn), sys.PtrSize)
   3237 	pc := getcallerpc()
   3238 	systemstack(func() {
   3239 		newproc1(fn, (*uint8)(argp), siz, pc)
   3240 	})
   3241 }
   3242 
   3243 // Create a new g running fn with narg bytes of arguments starting
   3244 // at argp. callerpc is the address of the go statement that created
   3245 // this. The new g is put on the queue of g's waiting to run.
   3246 func newproc1(fn *funcval, argp *uint8, narg int32, callerpc uintptr) {
   3247 	_g_ := getg()
   3248 
   3249 	if fn == nil {
   3250 		_g_.m.throwing = -1 // do not dump full stacks
   3251 		throw("go of nil func value")
   3252 	}
   3253 	_g_.m.locks++ // disable preemption because it can be holding p in a local var
   3254 	siz := narg
   3255 	siz = (siz + 7) &^ 7
   3256 
   3257 	// We could allocate a larger initial stack if necessary.
   3258 	// Not worth it: this is almost always an error.
   3259 	// 4*sizeof(uintreg): extra space added below
   3260 	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
   3261 	if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
   3262 		throw("newproc: function arguments too large for new goroutine")
   3263 	}
   3264 
   3265 	_p_ := _g_.m.p.ptr()
   3266 	newg := gfget(_p_)
   3267 	if newg == nil {
   3268 		newg = malg(_StackMin)
   3269 		casgstatus(newg, _Gidle, _Gdead)
   3270 		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
   3271 	}
   3272 	if newg.stack.hi == 0 {
   3273 		throw("newproc1: newg missing stack")
   3274 	}
   3275 
   3276 	if readgstatus(newg) != _Gdead {
   3277 		throw("newproc1: new g is not Gdead")
   3278 	}
   3279 
   3280 	totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
   3281 	totalSize += -totalSize & (sys.SpAlign - 1)                  // align to spAlign
   3282 	sp := newg.stack.hi - totalSize
   3283 	spArg := sp
   3284 	if usesLR {
   3285 		// caller's LR
   3286 		*(*uintptr)(unsafe.Pointer(sp)) = 0
   3287 		prepGoExitFrame(sp)
   3288 		spArg += sys.MinFrameSize
   3289 	}
   3290 	if narg > 0 {
   3291 		memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
   3292 		// This is a stack-to-stack copy. If write barriers
   3293 		// are enabled and the source stack is grey (the
   3294 		// destination is always black), then perform a
   3295 		// barrier copy. We do this *after* the memmove
   3296 		// because the destination stack may have garbage on
   3297 		// it.
   3298 		if writeBarrier.needed && !_g_.m.curg.gcscandone {
   3299 			f := findfunc(fn.fn)
   3300 			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   3301 			// We're in the prologue, so it's always stack map index 0.
   3302 			bv := stackmapdata(stkmap, 0)
   3303 			bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata)
   3304 		}
   3305 	}
   3306 
   3307 	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
   3308 	newg.sched.sp = sp
   3309 	newg.stktopsp = sp
   3310 	newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
   3311 	newg.sched.g = guintptr(unsafe.Pointer(newg))
   3312 	gostartcallfn(&newg.sched, fn)
   3313 	newg.gopc = callerpc
   3314 	newg.startpc = fn.fn
   3315 	if _g_.m.curg != nil {
   3316 		newg.labels = _g_.m.curg.labels
   3317 	}
   3318 	if isSystemGoroutine(newg) {
   3319 		atomic.Xadd(&sched.ngsys, +1)
   3320 	}
   3321 	newg.gcscanvalid = false
   3322 	casgstatus(newg, _Gdead, _Grunnable)
   3323 
   3324 	if _p_.goidcache == _p_.goidcacheend {
   3325 		// Sched.goidgen is the last allocated id,
   3326 		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
   3327 		// At startup sched.goidgen=0, so main goroutine receives goid=1.
   3328 		_p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
   3329 		_p_.goidcache -= _GoidCacheBatch - 1
   3330 		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
   3331 	}
   3332 	newg.goid = int64(_p_.goidcache)
   3333 	_p_.goidcache++
   3334 	if raceenabled {
   3335 		newg.racectx = racegostart(callerpc)
   3336 	}
   3337 	if trace.enabled {
   3338 		traceGoCreate(newg, newg.startpc)
   3339 	}
   3340 	runqput(_p_, newg, true)
   3341 
   3342 	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
   3343 		wakep()
   3344 	}
   3345 	_g_.m.locks--
   3346 	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   3347 		_g_.stackguard0 = stackPreempt
   3348 	}
   3349 }
   3350 
   3351 // Put on gfree list.
   3352 // If local list is too long, transfer a batch to the global list.
   3353 func gfput(_p_ *p, gp *g) {
   3354 	if readgstatus(gp) != _Gdead {
   3355 		throw("gfput: bad status (not Gdead)")
   3356 	}
   3357 
   3358 	stksize := gp.stack.hi - gp.stack.lo
   3359 
   3360 	if stksize != _FixedStack {
   3361 		// non-standard stack size - free it.
   3362 		stackfree(gp.stack)
   3363 		gp.stack.lo = 0
   3364 		gp.stack.hi = 0
   3365 		gp.stackguard0 = 0
   3366 	}
   3367 
   3368 	gp.schedlink.set(_p_.gfree)
   3369 	_p_.gfree = gp
   3370 	_p_.gfreecnt++
   3371 	if _p_.gfreecnt >= 64 {
   3372 		lock(&sched.gflock)
   3373 		for _p_.gfreecnt >= 32 {
   3374 			_p_.gfreecnt--
   3375 			gp = _p_.gfree
   3376 			_p_.gfree = gp.schedlink.ptr()
   3377 			if gp.stack.lo == 0 {
   3378 				gp.schedlink.set(sched.gfreeNoStack)
   3379 				sched.gfreeNoStack = gp
   3380 			} else {
   3381 				gp.schedlink.set(sched.gfreeStack)
   3382 				sched.gfreeStack = gp
   3383 			}
   3384 			sched.ngfree++
   3385 		}
   3386 		unlock(&sched.gflock)
   3387 	}
   3388 }
   3389 
   3390 // Get from gfree list.
   3391 // If local list is empty, grab a batch from global list.
   3392 func gfget(_p_ *p) *g {
   3393 retry:
   3394 	gp := _p_.gfree
   3395 	if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) {
   3396 		lock(&sched.gflock)
   3397 		for _p_.gfreecnt < 32 {
   3398 			if sched.gfreeStack != nil {
   3399 				// Prefer Gs with stacks.
   3400 				gp = sched.gfreeStack
   3401 				sched.gfreeStack = gp.schedlink.ptr()
   3402 			} else if sched.gfreeNoStack != nil {
   3403 				gp = sched.gfreeNoStack
   3404 				sched.gfreeNoStack = gp.schedlink.ptr()
   3405 			} else {
   3406 				break
   3407 			}
   3408 			_p_.gfreecnt++
   3409 			sched.ngfree--
   3410 			gp.schedlink.set(_p_.gfree)
   3411 			_p_.gfree = gp
   3412 		}
   3413 		unlock(&sched.gflock)
   3414 		goto retry
   3415 	}
   3416 	if gp != nil {
   3417 		_p_.gfree = gp.schedlink.ptr()
   3418 		_p_.gfreecnt--
   3419 		if gp.stack.lo == 0 {
   3420 			// Stack was deallocated in gfput. Allocate a new one.
   3421 			systemstack(func() {
   3422 				gp.stack = stackalloc(_FixedStack)
   3423 			})
   3424 			gp.stackguard0 = gp.stack.lo + _StackGuard
   3425 		} else {
   3426 			if raceenabled {
   3427 				racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   3428 			}
   3429 			if msanenabled {
   3430 				msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   3431 			}
   3432 		}
   3433 	}
   3434 	return gp
   3435 }
   3436 
   3437 // Purge all cached G's from gfree list to the global list.
   3438 func gfpurge(_p_ *p) {
   3439 	lock(&sched.gflock)
   3440 	for _p_.gfreecnt != 0 {
   3441 		_p_.gfreecnt--
   3442 		gp := _p_.gfree
   3443 		_p_.gfree = gp.schedlink.ptr()
   3444 		if gp.stack.lo == 0 {
   3445 			gp.schedlink.set(sched.gfreeNoStack)
   3446 			sched.gfreeNoStack = gp
   3447 		} else {
   3448 			gp.schedlink.set(sched.gfreeStack)
   3449 			sched.gfreeStack = gp
   3450 		}
   3451 		sched.ngfree++
   3452 	}
   3453 	unlock(&sched.gflock)
   3454 }
   3455 
   3456 // Breakpoint executes a breakpoint trap.
   3457 func Breakpoint() {
   3458 	breakpoint()
   3459 }
   3460 
   3461 // dolockOSThread is called by LockOSThread and lockOSThread below
   3462 // after they modify m.locked. Do not allow preemption during this call,
   3463 // or else the m might be different in this function than in the caller.
   3464 //go:nosplit
   3465 func dolockOSThread() {
   3466 	_g_ := getg()
   3467 	_g_.m.lockedg.set(_g_)
   3468 	_g_.lockedm.set(_g_.m)
   3469 }
   3470 
   3471 //go:nosplit
   3472 
   3473 // LockOSThread wires the calling goroutine to its current operating system thread.
   3474 // The calling goroutine will always execute in that thread,
   3475 // and no other goroutine will execute in it,
   3476 // until the calling goroutine has made as many calls to
   3477 // UnlockOSThread as to LockOSThread.
   3478 // If the calling goroutine exits without unlocking the thread,
   3479 // the thread will be terminated.
   3480 //
   3481 // A goroutine should call LockOSThread before calling OS services or
   3482 // non-Go library functions that depend on per-thread state.
   3483 func LockOSThread() {
   3484 	if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
   3485 		// If we need to start a new thread from the locked
   3486 		// thread, we need the template thread. Start it now
   3487 		// while we're in a known-good state.
   3488 		startTemplateThread()
   3489 	}
   3490 	_g_ := getg()
   3491 	_g_.m.lockedExt++
   3492 	if _g_.m.lockedExt == 0 {
   3493 		_g_.m.lockedExt--
   3494 		panic("LockOSThread nesting overflow")
   3495 	}
   3496 	dolockOSThread()
   3497 }
   3498 
   3499 //go:nosplit
   3500 func lockOSThread() {
   3501 	getg().m.lockedInt++
   3502 	dolockOSThread()
   3503 }
   3504 
   3505 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
   3506 // after they update m->locked. Do not allow preemption during this call,
   3507 // or else the m might be in different in this function than in the caller.
   3508 //go:nosplit
   3509 func dounlockOSThread() {
   3510 	_g_ := getg()
   3511 	if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
   3512 		return
   3513 	}
   3514 	_g_.m.lockedg = 0
   3515 	_g_.lockedm = 0
   3516 }
   3517 
   3518 //go:nosplit
   3519 
   3520 // UnlockOSThread undoes an earlier call to LockOSThread.
   3521 // If this drops the number of active LockOSThread calls on the
   3522 // calling goroutine to zero, it unwires the calling goroutine from
   3523 // its fixed operating system thread.
   3524 // If there are no active LockOSThread calls, this is a no-op.
   3525 //
   3526 // Before calling UnlockOSThread, the caller must ensure that the OS
   3527 // thread is suitable for running other goroutines. If the caller made
   3528 // any permanent changes to the state of the thread that would affect
   3529 // other goroutines, it should not call this function and thus leave
   3530 // the goroutine locked to the OS thread until the goroutine (and
   3531 // hence the thread) exits.
   3532 func UnlockOSThread() {
   3533 	_g_ := getg()
   3534 	if _g_.m.lockedExt == 0 {
   3535 		return
   3536 	}
   3537 	_g_.m.lockedExt--
   3538 	dounlockOSThread()
   3539 }
   3540 
   3541 //go:nosplit
   3542 func unlockOSThread() {
   3543 	_g_ := getg()
   3544 	if _g_.m.lockedInt == 0 {
   3545 		systemstack(badunlockosthread)
   3546 	}
   3547 	_g_.m.lockedInt--
   3548 	dounlockOSThread()
   3549 }
   3550 
   3551 func badunlockosthread() {
   3552 	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
   3553 }
   3554 
   3555 func gcount() int32 {
   3556 	n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
   3557 	for _, _p_ := range allp {
   3558 		n -= _p_.gfreecnt
   3559 	}
   3560 
   3561 	// All these variables can be changed concurrently, so the result can be inconsistent.
   3562 	// But at least the current goroutine is running.
   3563 	if n < 1 {
   3564 		n = 1
   3565 	}
   3566 	return n
   3567 }
   3568 
   3569 func mcount() int32 {
   3570 	return int32(sched.mnext - sched.nmfreed)
   3571 }
   3572 
   3573 var prof struct {
   3574 	signalLock uint32
   3575 	hz         int32
   3576 }
   3577 
   3578 func _System()                    { _System() }
   3579 func _ExternalCode()              { _ExternalCode() }
   3580 func _LostExternalCode()          { _LostExternalCode() }
   3581 func _GC()                        { _GC() }
   3582 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
   3583 
   3584 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
   3585 var lostAtomic64Count uint64
   3586 
   3587 // Called if we receive a SIGPROF signal.
   3588 // Called by the signal handler, may run during STW.
   3589 //go:nowritebarrierrec
   3590 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
   3591 	if prof.hz == 0 {
   3592 		return
   3593 	}
   3594 
   3595 	// On mips{,le}, 64bit atomics are emulated with spinlocks, in
   3596 	// runtime/internal/atomic. If SIGPROF arrives while the program is inside
   3597 	// the critical section, it creates a deadlock (when writing the sample).
   3598 	// As a workaround, create a counter of SIGPROFs while in critical section
   3599 	// to store the count, and pass it to sigprof.add() later when SIGPROF is
   3600 	// received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
   3601 	if GOARCH == "mips" || GOARCH == "mipsle" {
   3602 		if f := findfunc(pc); f.valid() {
   3603 			if hasprefix(funcname(f), "runtime/internal/atomic") {
   3604 				lostAtomic64Count++
   3605 				return
   3606 			}
   3607 		}
   3608 	}
   3609 
   3610 	// Profiling runs concurrently with GC, so it must not allocate.
   3611 	// Set a trap in case the code does allocate.
   3612 	// Note that on windows, one thread takes profiles of all the
   3613 	// other threads, so mp is usually not getg().m.
   3614 	// In fact mp may not even be stopped.
   3615 	// See golang.org/issue/17165.
   3616 	getg().m.mallocing++
   3617 
   3618 	// Define that a "user g" is a user-created goroutine, and a "system g"
   3619 	// is one that is m->g0 or m->gsignal.
   3620 	//
   3621 	// We might be interrupted for profiling halfway through a
   3622 	// goroutine switch. The switch involves updating three (or four) values:
   3623 	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
   3624 	// because once it gets updated the new g is running.
   3625 	//
   3626 	// When switching from a user g to a system g, LR is not considered live,
   3627 	// so the update only affects g, SP, and PC. Since PC must be last, there
   3628 	// the possible partial transitions in ordinary execution are (1) g alone is updated,
   3629 	// (2) both g and SP are updated, and (3) SP alone is updated.
   3630 	// If SP or g alone is updated, we can detect the partial transition by checking
   3631 	// whether the SP is within g's stack bounds. (We could also require that SP
   3632 	// be changed only after g, but the stack bounds check is needed by other
   3633 	// cases, so there is no need to impose an additional requirement.)
   3634 	//
   3635 	// There is one exceptional transition to a system g, not in ordinary execution.
   3636 	// When a signal arrives, the operating system starts the signal handler running
   3637 	// with an updated PC and SP. The g is updated last, at the beginning of the
   3638 	// handler. There are two reasons this is okay. First, until g is updated the
   3639 	// g and SP do not match, so the stack bounds check detects the partial transition.
   3640 	// Second, signal handlers currently run with signals disabled, so a profiling
   3641 	// signal cannot arrive during the handler.
   3642 	//
   3643 	// When switching from a system g to a user g, there are three possibilities.
   3644 	//
   3645 	// First, it may be that the g switch has no PC update, because the SP
   3646 	// either corresponds to a user g throughout (as in asmcgocall)
   3647 	// or because it has been arranged to look like a user g frame
   3648 	// (as in cgocallback_gofunc). In this case, since the entire
   3649 	// transition is a g+SP update, a partial transition updating just one of
   3650 	// those will be detected by the stack bounds check.
   3651 	//
   3652 	// Second, when returning from a signal handler, the PC and SP updates
   3653 	// are performed by the operating system in an atomic update, so the g
   3654 	// update must be done before them. The stack bounds check detects
   3655 	// the partial transition here, and (again) signal handlers run with signals
   3656 	// disabled, so a profiling signal cannot arrive then anyway.
   3657 	//
   3658 	// Third, the common case: it may be that the switch updates g, SP, and PC
   3659 	// separately. If the PC is within any of the functions that does this,
   3660 	// we don't ask for a traceback. C.F. the function setsSP for more about this.
   3661 	//
   3662 	// There is another apparently viable approach, recorded here in case
   3663 	// the "PC within setsSP function" check turns out not to be usable.
   3664 	// It would be possible to delay the update of either g or SP until immediately
   3665 	// before the PC update instruction. Then, because of the stack bounds check,
   3666 	// the only problematic interrupt point is just before that PC update instruction,
   3667 	// and the sigprof handler can detect that instruction and simulate stepping past
   3668 	// it in order to reach a consistent state. On ARM, the update of g must be made
   3669 	// in two places (in R10 and also in a TLS slot), so the delayed update would
   3670 	// need to be the SP update. The sigprof handler must read the instruction at
   3671 	// the current PC and if it was the known instruction (for example, JMP BX or
   3672 	// MOV R2, PC), use that other register in place of the PC value.
   3673 	// The biggest drawback to this solution is that it requires that we can tell
   3674 	// whether it's safe to read from the memory pointed at by PC.
   3675 	// In a correct program, we can test PC == nil and otherwise read,
   3676 	// but if a profiling signal happens at the instant that a program executes
   3677 	// a bad jump (before the program manages to handle the resulting fault)
   3678 	// the profiling handler could fault trying to read nonexistent memory.
   3679 	//
   3680 	// To recap, there are no constraints on the assembly being used for the
   3681 	// transition. We simply require that g and SP match and that the PC is not
   3682 	// in gogo.
   3683 	traceback := true
   3684 	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
   3685 		traceback = false
   3686 	}
   3687 	var stk [maxCPUProfStack]uintptr
   3688 	n := 0
   3689 	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
   3690 		cgoOff := 0
   3691 		// Check cgoCallersUse to make sure that we are not
   3692 		// interrupting other code that is fiddling with
   3693 		// cgoCallers.  We are running in a signal handler
   3694 		// with all signals blocked, so we don't have to worry
   3695 		// about any other code interrupting us.
   3696 		if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
   3697 			for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
   3698 				cgoOff++
   3699 			}
   3700 			copy(stk[:], mp.cgoCallers[:cgoOff])
   3701 			mp.cgoCallers[0] = 0
   3702 		}
   3703 
   3704 		// Collect Go stack that leads to the cgo call.
   3705 		n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
   3706 	} else if traceback {
   3707 		n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
   3708 	}
   3709 
   3710 	if n <= 0 {
   3711 		// Normal traceback is impossible or has failed.
   3712 		// See if it falls into several common cases.
   3713 		n = 0
   3714 		if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
   3715 			// Libcall, i.e. runtime syscall on windows.
   3716 			// Collect Go stack that leads to the call.
   3717 			n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
   3718 		}
   3719 		if n == 0 {
   3720 			// If all of the above has failed, account it against abstract "System" or "GC".
   3721 			n = 2
   3722 			// "ExternalCode" is better than "etext".
   3723 			if pc > firstmoduledata.etext {
   3724 				pc = funcPC(_ExternalCode) + sys.PCQuantum
   3725 			}
   3726 			stk[0] = pc
   3727 			if mp.preemptoff != "" || mp.helpgc != 0 {
   3728 				stk[1] = funcPC(_GC) + sys.PCQuantum
   3729 			} else {
   3730 				stk[1] = funcPC(_System) + sys.PCQuantum
   3731 			}
   3732 		}
   3733 	}
   3734 
   3735 	if prof.hz != 0 {
   3736 		if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
   3737 			cpuprof.addLostAtomic64(lostAtomic64Count)
   3738 			lostAtomic64Count = 0
   3739 		}
   3740 		cpuprof.add(gp, stk[:n])
   3741 	}
   3742 	getg().m.mallocing--
   3743 }
   3744 
   3745 // If the signal handler receives a SIGPROF signal on a non-Go thread,
   3746 // it tries to collect a traceback into sigprofCallers.
   3747 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
   3748 var sigprofCallers cgoCallers
   3749 var sigprofCallersUse uint32
   3750 
   3751 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
   3752 // and the signal handler collected a stack trace in sigprofCallers.
   3753 // When this is called, sigprofCallersUse will be non-zero.
   3754 // g is nil, and what we can do is very limited.
   3755 //go:nosplit
   3756 //go:nowritebarrierrec
   3757 func sigprofNonGo() {
   3758 	if prof.hz != 0 {
   3759 		n := 0
   3760 		for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
   3761 			n++
   3762 		}
   3763 		cpuprof.addNonGo(sigprofCallers[:n])
   3764 	}
   3765 
   3766 	atomic.Store(&sigprofCallersUse, 0)
   3767 }
   3768 
   3769 // sigprofNonGoPC is called when a profiling signal arrived on a
   3770 // non-Go thread and we have a single PC value, not a stack trace.
   3771 // g is nil, and what we can do is very limited.
   3772 //go:nosplit
   3773 //go:nowritebarrierrec
   3774 func sigprofNonGoPC(pc uintptr) {
   3775 	if prof.hz != 0 {
   3776 		stk := []uintptr{
   3777 			pc,
   3778 			funcPC(_ExternalCode) + sys.PCQuantum,
   3779 		}
   3780 		cpuprof.addNonGo(stk)
   3781 	}
   3782 }
   3783 
   3784 // Reports whether a function will set the SP
   3785 // to an absolute value. Important that
   3786 // we don't traceback when these are at the bottom
   3787 // of the stack since we can't be sure that we will
   3788 // find the caller.
   3789 //
   3790 // If the function is not on the bottom of the stack
   3791 // we assume that it will have set it up so that traceback will be consistent,
   3792 // either by being a traceback terminating function
   3793 // or putting one on the stack at the right offset.
   3794 func setsSP(pc uintptr) bool {
   3795 	f := findfunc(pc)
   3796 	if !f.valid() {
   3797 		// couldn't find the function for this PC,
   3798 		// so assume the worst and stop traceback
   3799 		return true
   3800 	}
   3801 	switch f.entry {
   3802 	case gogoPC, systemstackPC, mcallPC, morestackPC:
   3803 		return true
   3804 	}
   3805 	return false
   3806 }
   3807 
   3808 // setcpuprofilerate sets the CPU profiling rate to hz times per second.
   3809 // If hz <= 0, setcpuprofilerate turns off CPU profiling.
   3810 func setcpuprofilerate(hz int32) {
   3811 	// Force sane arguments.
   3812 	if hz < 0 {
   3813 		hz = 0
   3814 	}
   3815 
   3816 	// Disable preemption, otherwise we can be rescheduled to another thread
   3817 	// that has profiling enabled.
   3818 	_g_ := getg()
   3819 	_g_.m.locks++
   3820 
   3821 	// Stop profiler on this thread so that it is safe to lock prof.
   3822 	// if a profiling signal came in while we had prof locked,
   3823 	// it would deadlock.
   3824 	setThreadCPUProfiler(0)
   3825 
   3826 	for !atomic.Cas(&prof.signalLock, 0, 1) {
   3827 		osyield()
   3828 	}
   3829 	if prof.hz != hz {
   3830 		setProcessCPUProfiler(hz)
   3831 		prof.hz = hz
   3832 	}
   3833 	atomic.Store(&prof.signalLock, 0)
   3834 
   3835 	lock(&sched.lock)
   3836 	sched.profilehz = hz
   3837 	unlock(&sched.lock)
   3838 
   3839 	if hz != 0 {
   3840 		setThreadCPUProfiler(hz)
   3841 	}
   3842 
   3843 	_g_.m.locks--
   3844 }
   3845 
   3846 // Change number of processors. The world is stopped, sched is locked.
   3847 // gcworkbufs are not being modified by either the GC or
   3848 // the write barrier code.
   3849 // Returns list of Ps with local work, they need to be scheduled by the caller.
   3850 func procresize(nprocs int32) *p {
   3851 	old := gomaxprocs
   3852 	if old < 0 || nprocs <= 0 {
   3853 		throw("procresize: invalid arg")
   3854 	}
   3855 	if trace.enabled {
   3856 		traceGomaxprocs(nprocs)
   3857 	}
   3858 
   3859 	// update statistics
   3860 	now := nanotime()
   3861 	if sched.procresizetime != 0 {
   3862 		sched.totaltime += int64(old) * (now - sched.procresizetime)
   3863 	}
   3864 	sched.procresizetime = now
   3865 
   3866 	// Grow allp if necessary.
   3867 	if nprocs > int32(len(allp)) {
   3868 		// Synchronize with retake, which could be running
   3869 		// concurrently since it doesn't run on a P.
   3870 		lock(&allpLock)
   3871 		if nprocs <= int32(cap(allp)) {
   3872 			allp = allp[:nprocs]
   3873 		} else {
   3874 			nallp := make([]*p, nprocs)
   3875 			// Copy everything up to allp's cap so we
   3876 			// never lose old allocated Ps.
   3877 			copy(nallp, allp[:cap(allp)])
   3878 			allp = nallp
   3879 		}
   3880 		unlock(&allpLock)
   3881 	}
   3882 
   3883 	// initialize new P's
   3884 	for i := int32(0); i < nprocs; i++ {
   3885 		pp := allp[i]
   3886 		if pp == nil {
   3887 			pp = new(p)
   3888 			pp.id = i
   3889 			pp.status = _Pgcstop
   3890 			pp.sudogcache = pp.sudogbuf[:0]
   3891 			for i := range pp.deferpool {
   3892 				pp.deferpool[i] = pp.deferpoolbuf[i][:0]
   3893 			}
   3894 			pp.wbBuf.reset()
   3895 			atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
   3896 		}
   3897 		if pp.mcache == nil {
   3898 			if old == 0 && i == 0 {
   3899 				if getg().m.mcache == nil {
   3900 					throw("missing mcache?")
   3901 				}
   3902 				pp.mcache = getg().m.mcache // bootstrap
   3903 			} else {
   3904 				pp.mcache = allocmcache()
   3905 			}
   3906 		}
   3907 		if raceenabled && pp.racectx == 0 {
   3908 			if old == 0 && i == 0 {
   3909 				pp.racectx = raceprocctx0
   3910 				raceprocctx0 = 0 // bootstrap
   3911 			} else {
   3912 				pp.racectx = raceproccreate()
   3913 			}
   3914 		}
   3915 	}
   3916 
   3917 	// free unused P's
   3918 	for i := nprocs; i < old; i++ {
   3919 		p := allp[i]
   3920 		if trace.enabled && p == getg().m.p.ptr() {
   3921 			// moving to p[0], pretend that we were descheduled
   3922 			// and then scheduled again to keep the trace sane.
   3923 			traceGoSched()
   3924 			traceProcStop(p)
   3925 		}
   3926 		// move all runnable goroutines to the global queue
   3927 		for p.runqhead != p.runqtail {
   3928 			// pop from tail of local queue
   3929 			p.runqtail--
   3930 			gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
   3931 			// push onto head of global queue
   3932 			globrunqputhead(gp)
   3933 		}
   3934 		if p.runnext != 0 {
   3935 			globrunqputhead(p.runnext.ptr())
   3936 			p.runnext = 0
   3937 		}
   3938 		// if there's a background worker, make it runnable and put
   3939 		// it on the global queue so it can clean itself up
   3940 		if gp := p.gcBgMarkWorker.ptr(); gp != nil {
   3941 			casgstatus(gp, _Gwaiting, _Grunnable)
   3942 			if trace.enabled {
   3943 				traceGoUnpark(gp, 0)
   3944 			}
   3945 			globrunqput(gp)
   3946 			// This assignment doesn't race because the
   3947 			// world is stopped.
   3948 			p.gcBgMarkWorker.set(nil)
   3949 		}
   3950 		// Flush p's write barrier buffer.
   3951 		if gcphase != _GCoff {
   3952 			wbBufFlush1(p)
   3953 			p.gcw.dispose()
   3954 		}
   3955 		for i := range p.sudogbuf {
   3956 			p.sudogbuf[i] = nil
   3957 		}
   3958 		p.sudogcache = p.sudogbuf[:0]
   3959 		for i := range p.deferpool {
   3960 			for j := range p.deferpoolbuf[i] {
   3961 				p.deferpoolbuf[i][j] = nil
   3962 			}
   3963 			p.deferpool[i] = p.deferpoolbuf[i][:0]
   3964 		}
   3965 		freemcache(p.mcache)
   3966 		p.mcache = nil
   3967 		gfpurge(p)
   3968 		traceProcFree(p)
   3969 		if raceenabled {
   3970 			raceprocdestroy(p.racectx)
   3971 			p.racectx = 0
   3972 		}
   3973 		p.gcAssistTime = 0
   3974 		p.status = _Pdead
   3975 		// can't free P itself because it can be referenced by an M in syscall
   3976 	}
   3977 
   3978 	// Trim allp.
   3979 	if int32(len(allp)) != nprocs {
   3980 		lock(&allpLock)
   3981 		allp = allp[:nprocs]
   3982 		unlock(&allpLock)
   3983 	}
   3984 
   3985 	_g_ := getg()
   3986 	if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
   3987 		// continue to use the current P
   3988 		_g_.m.p.ptr().status = _Prunning
   3989 	} else {
   3990 		// release the current P and acquire allp[0]
   3991 		if _g_.m.p != 0 {
   3992 			_g_.m.p.ptr().m = 0
   3993 		}
   3994 		_g_.m.p = 0
   3995 		_g_.m.mcache = nil
   3996 		p := allp[0]
   3997 		p.m = 0
   3998 		p.status = _Pidle
   3999 		acquirep(p)
   4000 		if trace.enabled {
   4001 			traceGoStart()
   4002 		}
   4003 	}
   4004 	var runnablePs *p
   4005 	for i := nprocs - 1; i >= 0; i-- {
   4006 		p := allp[i]
   4007 		if _g_.m.p.ptr() == p {
   4008 			continue
   4009 		}
   4010 		p.status = _Pidle
   4011 		if runqempty(p) {
   4012 			pidleput(p)
   4013 		} else {
   4014 			p.m.set(mget())
   4015 			p.link.set(runnablePs)
   4016 			runnablePs = p
   4017 		}
   4018 	}
   4019 	stealOrder.reset(uint32(nprocs))
   4020 	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
   4021 	atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
   4022 	return runnablePs
   4023 }
   4024 
   4025 // Associate p and the current m.
   4026 //
   4027 // This function is allowed to have write barriers even if the caller
   4028 // isn't because it immediately acquires _p_.
   4029 //
   4030 //go:yeswritebarrierrec
   4031 func acquirep(_p_ *p) {
   4032 	// Do the part that isn't allowed to have write barriers.
   4033 	acquirep1(_p_)
   4034 
   4035 	// have p; write barriers now allowed
   4036 	_g_ := getg()
   4037 	_g_.m.mcache = _p_.mcache
   4038 
   4039 	if trace.enabled {
   4040 		traceProcStart()
   4041 	}
   4042 }
   4043 
   4044 // acquirep1 is the first step of acquirep, which actually acquires
   4045 // _p_. This is broken out so we can disallow write barriers for this
   4046 // part, since we don't yet have a P.
   4047 //
   4048 //go:nowritebarrierrec
   4049 func acquirep1(_p_ *p) {
   4050 	_g_ := getg()
   4051 
   4052 	if _g_.m.p != 0 || _g_.m.mcache != nil {
   4053 		throw("acquirep: already in go")
   4054 	}
   4055 	if _p_.m != 0 || _p_.status != _Pidle {
   4056 		id := int64(0)
   4057 		if _p_.m != 0 {
   4058 			id = _p_.m.ptr().id
   4059 		}
   4060 		print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
   4061 		throw("acquirep: invalid p state")
   4062 	}
   4063 	_g_.m.p.set(_p_)
   4064 	_p_.m.set(_g_.m)
   4065 	_p_.status = _Prunning
   4066 }
   4067 
   4068 // Disassociate p and the current m.
   4069 func releasep() *p {
   4070 	_g_ := getg()
   4071 
   4072 	if _g_.m.p == 0 || _g_.m.mcache == nil {
   4073 		throw("releasep: invalid arg")
   4074 	}
   4075 	_p_ := _g_.m.p.ptr()
   4076 	if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
   4077 		print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
   4078 		throw("releasep: invalid p state")
   4079 	}
   4080 	if trace.enabled {
   4081 		traceProcStop(_g_.m.p.ptr())
   4082 	}
   4083 	_g_.m.p = 0
   4084 	_g_.m.mcache = nil
   4085 	_p_.m = 0
   4086 	_p_.status = _Pidle
   4087 	return _p_
   4088 }
   4089 
   4090 func incidlelocked(v int32) {
   4091 	lock(&sched.lock)
   4092 	sched.nmidlelocked += v
   4093 	if v > 0 {
   4094 		checkdead()
   4095 	}
   4096 	unlock(&sched.lock)
   4097 }
   4098 
   4099 // Check for deadlock situation.
   4100 // The check is based on number of running M's, if 0 -> deadlock.
   4101 // sched.lock must be held.
   4102 func checkdead() {
   4103 	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
   4104 	// there are no running goroutines. The calling program is
   4105 	// assumed to be running.
   4106 	if islibrary || isarchive {
   4107 		return
   4108 	}
   4109 
   4110 	// If we are dying because of a signal caught on an already idle thread,
   4111 	// freezetheworld will cause all running threads to block.
   4112 	// And runtime will essentially enter into deadlock state,
   4113 	// except that there is a thread that will call exit soon.
   4114 	if panicking > 0 {
   4115 		return
   4116 	}
   4117 
   4118 	run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
   4119 	if run > 0 {
   4120 		return
   4121 	}
   4122 	if run < 0 {
   4123 		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
   4124 		throw("checkdead: inconsistent counts")
   4125 	}
   4126 
   4127 	grunning := 0
   4128 	lock(&allglock)
   4129 	for i := 0; i < len(allgs); i++ {
   4130 		gp := allgs[i]
   4131 		if isSystemGoroutine(gp) {
   4132 			continue
   4133 		}
   4134 		s := readgstatus(gp)
   4135 		switch s &^ _Gscan {
   4136 		case _Gwaiting:
   4137 			grunning++
   4138 		case _Grunnable,
   4139 			_Grunning,
   4140 			_Gsyscall:
   4141 			unlock(&allglock)
   4142 			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
   4143 			throw("checkdead: runnable g")
   4144 		}
   4145 	}
   4146 	unlock(&allglock)
   4147 	if grunning == 0 { // possible if main goroutine calls runtimeGoexit()
   4148 		throw("no goroutines (main called runtime.Goexit) - deadlock!")
   4149 	}
   4150 
   4151 	// Maybe jump time forward for playground.
   4152 	gp := timejump()
   4153 	if gp != nil {
   4154 		casgstatus(gp, _Gwaiting, _Grunnable)
   4155 		globrunqput(gp)
   4156 		_p_ := pidleget()
   4157 		if _p_ == nil {
   4158 			throw("checkdead: no p for timer")
   4159 		}
   4160 		mp := mget()
   4161 		if mp == nil {
   4162 			// There should always be a free M since
   4163 			// nothing is running.
   4164 			throw("checkdead: no m for timer")
   4165 		}
   4166 		mp.nextp.set(_p_)
   4167 		notewakeup(&mp.park)
   4168 		return
   4169 	}
   4170 
   4171 	getg().m.throwing = -1 // do not dump full stacks
   4172 	throw("all goroutines are asleep - deadlock!")
   4173 }
   4174 
   4175 // forcegcperiod is the maximum time in nanoseconds between garbage
   4176 // collections. If we go this long without a garbage collection, one
   4177 // is forced to run.
   4178 //
   4179 // This is a variable for testing purposes. It normally doesn't change.
   4180 var forcegcperiod int64 = 2 * 60 * 1e9
   4181 
   4182 // Always runs without a P, so write barriers are not allowed.
   4183 //
   4184 //go:nowritebarrierrec
   4185 func sysmon() {
   4186 	lock(&sched.lock)
   4187 	sched.nmsys++
   4188 	checkdead()
   4189 	unlock(&sched.lock)
   4190 
   4191 	// If a heap span goes unused for 5 minutes after a garbage collection,
   4192 	// we hand it back to the operating system.
   4193 	scavengelimit := int64(5 * 60 * 1e9)
   4194 
   4195 	if debug.scavenge > 0 {
   4196 		// Scavenge-a-lot for testing.
   4197 		forcegcperiod = 10 * 1e6
   4198 		scavengelimit = 20 * 1e6
   4199 	}
   4200 
   4201 	lastscavenge := nanotime()
   4202 	nscavenge := 0
   4203 
   4204 	lasttrace := int64(0)
   4205 	idle := 0 // how many cycles in succession we had not wokeup somebody
   4206 	delay := uint32(0)
   4207 	for {
   4208 		if idle == 0 { // start with 20us sleep...
   4209 			delay = 20
   4210 		} else if idle > 50 { // start doubling the sleep after 1ms...
   4211 			delay *= 2
   4212 		}
   4213 		if delay > 10*1000 { // up to 10ms
   4214 			delay = 10 * 1000
   4215 		}
   4216 		usleep(delay)
   4217 		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
   4218 			lock(&sched.lock)
   4219 			if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
   4220 				atomic.Store(&sched.sysmonwait, 1)
   4221 				unlock(&sched.lock)
   4222 				// Make wake-up period small enough
   4223 				// for the sampling to be correct.
   4224 				maxsleep := forcegcperiod / 2
   4225 				if scavengelimit < forcegcperiod {
   4226 					maxsleep = scavengelimit / 2
   4227 				}
   4228 				shouldRelax := true
   4229 				if osRelaxMinNS > 0 {
   4230 					next := timeSleepUntil()
   4231 					now := nanotime()
   4232 					if next-now < osRelaxMinNS {
   4233 						shouldRelax = false
   4234 					}
   4235 				}
   4236 				if shouldRelax {
   4237 					osRelax(true)
   4238 				}
   4239 				notetsleep(&sched.sysmonnote, maxsleep)
   4240 				if shouldRelax {
   4241 					osRelax(false)
   4242 				}
   4243 				lock(&sched.lock)
   4244 				atomic.Store(&sched.sysmonwait, 0)
   4245 				noteclear(&sched.sysmonnote)
   4246 				idle = 0
   4247 				delay = 20
   4248 			}
   4249 			unlock(&sched.lock)
   4250 		}
   4251 		// trigger libc interceptors if needed
   4252 		if *cgo_yield != nil {
   4253 			asmcgocall(*cgo_yield, nil)
   4254 		}
   4255 		// poll network if not polled for more than 10ms
   4256 		lastpoll := int64(atomic.Load64(&sched.lastpoll))
   4257 		now := nanotime()
   4258 		if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
   4259 			atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
   4260 			gp := netpoll(false) // non-blocking - returns list of goroutines
   4261 			if gp != nil {
   4262 				// Need to decrement number of idle locked M's
   4263 				// (pretending that one more is running) before injectglist.
   4264 				// Otherwise it can lead to the following situation:
   4265 				// injectglist grabs all P's but before it starts M's to run the P's,
   4266 				// another M returns from syscall, finishes running its G,
   4267 				// observes that there is no work to do and no other running M's
   4268 				// and reports deadlock.
   4269 				incidlelocked(-1)
   4270 				injectglist(gp)
   4271 				incidlelocked(1)
   4272 			}
   4273 		}
   4274 		// retake P's blocked in syscalls
   4275 		// and preempt long running G's
   4276 		if retake(now) != 0 {
   4277 			idle = 0
   4278 		} else {
   4279 			idle++
   4280 		}
   4281 		// check if we need to force a GC
   4282 		if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
   4283 			lock(&forcegc.lock)
   4284 			forcegc.idle = 0
   4285 			forcegc.g.schedlink = 0
   4286 			injectglist(forcegc.g)
   4287 			unlock(&forcegc.lock)
   4288 		}
   4289 		// scavenge heap once in a while
   4290 		if lastscavenge+scavengelimit/2 < now {
   4291 			mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
   4292 			lastscavenge = now
   4293 			nscavenge++
   4294 		}
   4295 		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
   4296 			lasttrace = now
   4297 			schedtrace(debug.scheddetail > 0)
   4298 		}
   4299 	}
   4300 }
   4301 
   4302 type sysmontick struct {
   4303 	schedtick   uint32
   4304 	schedwhen   int64
   4305 	syscalltick uint32
   4306 	syscallwhen int64
   4307 }
   4308 
   4309 // forcePreemptNS is the time slice given to a G before it is
   4310 // preempted.
   4311 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
   4312 
   4313 func retake(now int64) uint32 {
   4314 	n := 0
   4315 	// Prevent allp slice changes. This lock will be completely
   4316 	// uncontended unless we're already stopping the world.
   4317 	lock(&allpLock)
   4318 	// We can't use a range loop over allp because we may
   4319 	// temporarily drop the allpLock. Hence, we need to re-fetch
   4320 	// allp each time around the loop.
   4321 	for i := 0; i < len(allp); i++ {
   4322 		_p_ := allp[i]
   4323 		if _p_ == nil {
   4324 			// This can happen if procresize has grown
   4325 			// allp but not yet created new Ps.
   4326 			continue
   4327 		}
   4328 		pd := &_p_.sysmontick
   4329 		s := _p_.status
   4330 		if s == _Psyscall {
   4331 			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
   4332 			t := int64(_p_.syscalltick)
   4333 			if int64(pd.syscalltick) != t {
   4334 				pd.syscalltick = uint32(t)
   4335 				pd.syscallwhen = now
   4336 				continue
   4337 			}
   4338 			// On the one hand we don't want to retake Ps if there is no other work to do,
   4339 			// but on the other hand we want to retake them eventually
   4340 			// because they can prevent the sysmon thread from deep sleep.
   4341 			if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
   4342 				continue
   4343 			}
   4344 			// Drop allpLock so we can take sched.lock.
   4345 			unlock(&allpLock)
   4346 			// Need to decrement number of idle locked M's
   4347 			// (pretending that one more is running) before the CAS.
   4348 			// Otherwise the M from which we retake can exit the syscall,
   4349 			// increment nmidle and report deadlock.
   4350 			incidlelocked(-1)
   4351 			if atomic.Cas(&_p_.status, s, _Pidle) {
   4352 				if trace.enabled {
   4353 					traceGoSysBlock(_p_)
   4354 					traceProcStop(_p_)
   4355 				}
   4356 				n++
   4357 				_p_.syscalltick++
   4358 				handoffp(_p_)
   4359 			}
   4360 			incidlelocked(1)
   4361 			lock(&allpLock)
   4362 		} else if s == _Prunning {
   4363 			// Preempt G if it's running for too long.
   4364 			t := int64(_p_.schedtick)
   4365 			if int64(pd.schedtick) != t {
   4366 				pd.schedtick = uint32(t)
   4367 				pd.schedwhen = now
   4368 				continue
   4369 			}
   4370 			if pd.schedwhen+forcePreemptNS > now {
   4371 				continue
   4372 			}
   4373 			preemptone(_p_)
   4374 		}
   4375 	}
   4376 	unlock(&allpLock)
   4377 	return uint32(n)
   4378 }
   4379 
   4380 // Tell all goroutines that they have been preempted and they should stop.
   4381 // This function is purely best-effort. It can fail to inform a goroutine if a
   4382 // processor just started running it.
   4383 // No locks need to be held.
   4384 // Returns true if preemption request was issued to at least one goroutine.
   4385 func preemptall() bool {
   4386 	res := false
   4387 	for _, _p_ := range allp {
   4388 		if _p_.status != _Prunning {
   4389 			continue
   4390 		}
   4391 		if preemptone(_p_) {
   4392 			res = true
   4393 		}
   4394 	}
   4395 	return res
   4396 }
   4397 
   4398 // Tell the goroutine running on processor P to stop.
   4399 // This function is purely best-effort. It can incorrectly fail to inform the
   4400 // goroutine. It can send inform the wrong goroutine. Even if it informs the
   4401 // correct goroutine, that goroutine might ignore the request if it is
   4402 // simultaneously executing newstack.
   4403 // No lock needs to be held.
   4404 // Returns true if preemption request was issued.
   4405 // The actual preemption will happen at some point in the future
   4406 // and will be indicated by the gp->status no longer being
   4407 // Grunning
   4408 func preemptone(_p_ *p) bool {
   4409 	mp := _p_.m.ptr()
   4410 	if mp == nil || mp == getg().m {
   4411 		return false
   4412 	}
   4413 	gp := mp.curg
   4414 	if gp == nil || gp == mp.g0 {
   4415 		return false
   4416 	}
   4417 
   4418 	gp.preempt = true
   4419 
   4420 	// Every call in a go routine checks for stack overflow by
   4421 	// comparing the current stack pointer to gp->stackguard0.
   4422 	// Setting gp->stackguard0 to StackPreempt folds
   4423 	// preemption into the normal stack overflow check.
   4424 	gp.stackguard0 = stackPreempt
   4425 	return true
   4426 }
   4427 
   4428 var starttime int64
   4429 
   4430 func schedtrace(detailed bool) {
   4431 	now := nanotime()
   4432 	if starttime == 0 {
   4433 		starttime = now
   4434 	}
   4435 
   4436 	lock(&sched.lock)
   4437 	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
   4438 	if detailed {
   4439 		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
   4440 	}
   4441 	// We must be careful while reading data from P's, M's and G's.
   4442 	// Even if we hold schedlock, most data can be changed concurrently.
   4443 	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
   4444 	for i, _p_ := range allp {
   4445 		mp := _p_.m.ptr()
   4446 		h := atomic.Load(&_p_.runqhead)
   4447 		t := atomic.Load(&_p_.runqtail)
   4448 		if detailed {
   4449 			id := int64(-1)
   4450 			if mp != nil {
   4451 				id = mp.id
   4452 			}
   4453 			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
   4454 		} else {
   4455 			// In non-detailed mode format lengths of per-P run queues as:
   4456 			// [len1 len2 len3 len4]
   4457 			print(" ")
   4458 			if i == 0 {
   4459 				print("[")
   4460 			}
   4461 			print(t - h)
   4462 			if i == len(allp)-1 {
   4463 				print("]\n")
   4464 			}
   4465 		}
   4466 	}
   4467 
   4468 	if !detailed {
   4469 		unlock(&sched.lock)
   4470 		return
   4471 	}
   4472 
   4473 	for mp := allm; mp != nil; mp = mp.alllink {
   4474 		_p_ := mp.p.ptr()
   4475 		gp := mp.curg
   4476 		lockedg := mp.lockedg.ptr()
   4477 		id1 := int32(-1)
   4478 		if _p_ != nil {
   4479 			id1 = _p_.id
   4480 		}
   4481 		id2 := int64(-1)
   4482 		if gp != nil {
   4483 			id2 = gp.goid
   4484 		}
   4485 		id3 := int64(-1)
   4486 		if lockedg != nil {
   4487 			id3 = lockedg.goid
   4488 		}
   4489 		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
   4490 	}
   4491 
   4492 	lock(&allglock)
   4493 	for gi := 0; gi < len(allgs); gi++ {
   4494 		gp := allgs[gi]
   4495 		mp := gp.m
   4496 		lockedm := gp.lockedm.ptr()
   4497 		id1 := int64(-1)
   4498 		if mp != nil {
   4499 			id1 = mp.id
   4500 		}
   4501 		id2 := int64(-1)
   4502 		if lockedm != nil {
   4503 			id2 = lockedm.id
   4504 		}
   4505 		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
   4506 	}
   4507 	unlock(&allglock)
   4508 	unlock(&sched.lock)
   4509 }
   4510 
   4511 // Put mp on midle list.
   4512 // Sched must be locked.
   4513 // May run during STW, so write barriers are not allowed.
   4514 //go:nowritebarrierrec
   4515 func mput(mp *m) {
   4516 	mp.schedlink = sched.midle
   4517 	sched.midle.set(mp)
   4518 	sched.nmidle++
   4519 	checkdead()
   4520 }
   4521 
   4522 // Try to get an m from midle list.
   4523 // Sched must be locked.
   4524 // May run during STW, so write barriers are not allowed.
   4525 //go:nowritebarrierrec
   4526 func mget() *m {
   4527 	mp := sched.midle.ptr()
   4528 	if mp != nil {
   4529 		sched.midle = mp.schedlink
   4530 		sched.nmidle--
   4531 	}
   4532 	return mp
   4533 }
   4534 
   4535 // Put gp on the global runnable queue.
   4536 // Sched must be locked.
   4537 // May run during STW, so write barriers are not allowed.
   4538 //go:nowritebarrierrec
   4539 func globrunqput(gp *g) {
   4540 	gp.schedlink = 0
   4541 	if sched.runqtail != 0 {
   4542 		sched.runqtail.ptr().schedlink.set(gp)
   4543 	} else {
   4544 		sched.runqhead.set(gp)
   4545 	}
   4546 	sched.runqtail.set(gp)
   4547 	sched.runqsize++
   4548 }
   4549 
   4550 // Put gp at the head of the global runnable queue.
   4551 // Sched must be locked.
   4552 // May run during STW, so write barriers are not allowed.
   4553 //go:nowritebarrierrec
   4554 func globrunqputhead(gp *g) {
   4555 	gp.schedlink = sched.runqhead
   4556 	sched.runqhead.set(gp)
   4557 	if sched.runqtail == 0 {
   4558 		sched.runqtail.set(gp)
   4559 	}
   4560 	sched.runqsize++
   4561 }
   4562 
   4563 // Put a batch of runnable goroutines on the global runnable queue.
   4564 // Sched must be locked.
   4565 func globrunqputbatch(ghead *g, gtail *g, n int32) {
   4566 	gtail.schedlink = 0
   4567 	if sched.runqtail != 0 {
   4568 		sched.runqtail.ptr().schedlink.set(ghead)
   4569 	} else {
   4570 		sched.runqhead.set(ghead)
   4571 	}
   4572 	sched.runqtail.set(gtail)
   4573 	sched.runqsize += n
   4574 }
   4575 
   4576 // Try get a batch of G's from the global runnable queue.
   4577 // Sched must be locked.
   4578 func globrunqget(_p_ *p, max int32) *g {
   4579 	if sched.runqsize == 0 {
   4580 		return nil
   4581 	}
   4582 
   4583 	n := sched.runqsize/gomaxprocs + 1
   4584 	if n > sched.runqsize {
   4585 		n = sched.runqsize
   4586 	}
   4587 	if max > 0 && n > max {
   4588 		n = max
   4589 	}
   4590 	if n > int32(len(_p_.runq))/2 {
   4591 		n = int32(len(_p_.runq)) / 2
   4592 	}
   4593 
   4594 	sched.runqsize -= n
   4595 	if sched.runqsize == 0 {
   4596 		sched.runqtail = 0
   4597 	}
   4598 
   4599 	gp := sched.runqhead.ptr()
   4600 	sched.runqhead = gp.schedlink
   4601 	n--
   4602 	for ; n > 0; n-- {
   4603 		gp1 := sched.runqhead.ptr()
   4604 		sched.runqhead = gp1.schedlink
   4605 		runqput(_p_, gp1, false)
   4606 	}
   4607 	return gp
   4608 }
   4609 
   4610 // Put p to on _Pidle list.
   4611 // Sched must be locked.
   4612 // May run during STW, so write barriers are not allowed.
   4613 //go:nowritebarrierrec
   4614 func pidleput(_p_ *p) {
   4615 	if !runqempty(_p_) {
   4616 		throw("pidleput: P has non-empty run queue")
   4617 	}
   4618 	_p_.link = sched.pidle
   4619 	sched.pidle.set(_p_)
   4620 	atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
   4621 }
   4622 
   4623 // Try get a p from _Pidle list.
   4624 // Sched must be locked.
   4625 // May run during STW, so write barriers are not allowed.
   4626 //go:nowritebarrierrec
   4627 func pidleget() *p {
   4628 	_p_ := sched.pidle.ptr()
   4629 	if _p_ != nil {
   4630 		sched.pidle = _p_.link
   4631 		atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
   4632 	}
   4633 	return _p_
   4634 }
   4635 
   4636 // runqempty returns true if _p_ has no Gs on its local run queue.
   4637 // It never returns true spuriously.
   4638 func runqempty(_p_ *p) bool {
   4639 	// Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
   4640 	// 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
   4641 	// Simply observing that runqhead == runqtail and then observing that runqnext == nil
   4642 	// does not mean the queue is empty.
   4643 	for {
   4644 		head := atomic.Load(&_p_.runqhead)
   4645 		tail := atomic.Load(&_p_.runqtail)
   4646 		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
   4647 		if tail == atomic.Load(&_p_.runqtail) {
   4648 			return head == tail && runnext == 0
   4649 		}
   4650 	}
   4651 }
   4652 
   4653 // To shake out latent assumptions about scheduling order,
   4654 // we introduce some randomness into scheduling decisions
   4655 // when running with the race detector.
   4656 // The need for this was made obvious by changing the
   4657 // (deterministic) scheduling order in Go 1.5 and breaking
   4658 // many poorly-written tests.
   4659 // With the randomness here, as long as the tests pass
   4660 // consistently with -race, they shouldn't have latent scheduling
   4661 // assumptions.
   4662 const randomizeScheduler = raceenabled
   4663 
   4664 // runqput tries to put g on the local runnable queue.
   4665 // If next if false, runqput adds g to the tail of the runnable queue.
   4666 // If next is true, runqput puts g in the _p_.runnext slot.
   4667 // If the run queue is full, runnext puts g on the global queue.
   4668 // Executed only by the owner P.
   4669 func runqput(_p_ *p, gp *g, next bool) {
   4670 	if randomizeScheduler && next && fastrand()%2 == 0 {
   4671 		next = false
   4672 	}
   4673 
   4674 	if next {
   4675 	retryNext:
   4676 		oldnext := _p_.runnext
   4677 		if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
   4678 			goto retryNext
   4679 		}
   4680 		if oldnext == 0 {
   4681 			return
   4682 		}
   4683 		// Kick the old runnext out to the regular run queue.
   4684 		gp = oldnext.ptr()
   4685 	}
   4686 
   4687 retry:
   4688 	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
   4689 	t := _p_.runqtail
   4690 	if t-h < uint32(len(_p_.runq)) {
   4691 		_p_.runq[t%uint32(len(_p_.runq))].set(gp)
   4692 		atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
   4693 		return
   4694 	}
   4695 	if runqputslow(_p_, gp, h, t) {
   4696 		return
   4697 	}
   4698 	// the queue is not full, now the put above must succeed
   4699 	goto retry
   4700 }
   4701 
   4702 // Put g and a batch of work from local runnable queue on global queue.
   4703 // Executed only by the owner P.
   4704 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
   4705 	var batch [len(_p_.runq)/2 + 1]*g
   4706 
   4707 	// First, grab a batch from local queue.
   4708 	n := t - h
   4709 	n = n / 2
   4710 	if n != uint32(len(_p_.runq)/2) {
   4711 		throw("runqputslow: queue is not full")
   4712 	}
   4713 	for i := uint32(0); i < n; i++ {
   4714 		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
   4715 	}
   4716 	if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
   4717 		return false
   4718 	}
   4719 	batch[n] = gp
   4720 
   4721 	if randomizeScheduler {
   4722 		for i := uint32(1); i <= n; i++ {
   4723 			j := fastrandn(i + 1)
   4724 			batch[i], batch[j] = batch[j], batch[i]
   4725 		}
   4726 	}
   4727 
   4728 	// Link the goroutines.
   4729 	for i := uint32(0); i < n; i++ {
   4730 		batch[i].schedlink.set(batch[i+1])
   4731 	}
   4732 
   4733 	// Now put the batch on global queue.
   4734 	lock(&sched.lock)
   4735 	globrunqputbatch(batch[0], batch[n], int32(n+1))
   4736 	unlock(&sched.lock)
   4737 	return true
   4738 }
   4739 
   4740 // Get g from local runnable queue.
   4741 // If inheritTime is true, gp should inherit the remaining time in the
   4742 // current time slice. Otherwise, it should start a new time slice.
   4743 // Executed only by the owner P.
   4744 func runqget(_p_ *p) (gp *g, inheritTime bool) {
   4745 	// If there's a runnext, it's the next G to run.
   4746 	for {
   4747 		next := _p_.runnext
   4748 		if next == 0 {
   4749 			break
   4750 		}
   4751 		if _p_.runnext.cas(next, 0) {
   4752 			return next.ptr(), true
   4753 		}
   4754 	}
   4755 
   4756 	for {
   4757 		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
   4758 		t := _p_.runqtail
   4759 		if t == h {
   4760 			return nil, false
   4761 		}
   4762 		gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
   4763 		if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
   4764 			return gp, false
   4765 		}
   4766 	}
   4767 }
   4768 
   4769 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
   4770 // Batch is a ring buffer starting at batchHead.
   4771 // Returns number of grabbed goroutines.
   4772 // Can be executed by any P.
   4773 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
   4774 	for {
   4775 		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
   4776 		t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
   4777 		n := t - h
   4778 		n = n - n/2
   4779 		if n == 0 {
   4780 			if stealRunNextG {
   4781 				// Try to steal from _p_.runnext.
   4782 				if next := _p_.runnext; next != 0 {
   4783 					if _p_.status == _Prunning {
   4784 						// Sleep to ensure that _p_ isn't about to run the g
   4785 						// we are about to steal.
   4786 						// The important use case here is when the g running
   4787 						// on _p_ ready()s another g and then almost
   4788 						// immediately blocks. Instead of stealing runnext
   4789 						// in this window, back off to give _p_ a chance to
   4790 						// schedule runnext. This will avoid thrashing gs
   4791 						// between different Ps.
   4792 						// A sync chan send/recv takes ~50ns as of time of
   4793 						// writing, so 3us gives ~50x overshoot.
   4794 						if GOOS != "windows" {
   4795 							usleep(3)
   4796 						} else {
   4797 							// On windows system timer granularity is
   4798 							// 1-15ms, which is way too much for this
   4799 							// optimization. So just yield.
   4800 							osyield()
   4801 						}
   4802 					}
   4803 					if !_p_.runnext.cas(next, 0) {
   4804 						continue
   4805 					}
   4806 					batch[batchHead%uint32(len(batch))] = next
   4807 					return 1
   4808 				}
   4809 			}
   4810 			return 0
   4811 		}
   4812 		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
   4813 			continue
   4814 		}
   4815 		for i := uint32(0); i < n; i++ {
   4816 			g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
   4817 			batch[(batchHead+i)%uint32(len(batch))] = g
   4818 		}
   4819 		if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
   4820 			return n
   4821 		}
   4822 	}
   4823 }
   4824 
   4825 // Steal half of elements from local runnable queue of p2
   4826 // and put onto local runnable queue of p.
   4827 // Returns one of the stolen elements (or nil if failed).
   4828 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
   4829 	t := _p_.runqtail
   4830 	n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
   4831 	if n == 0 {
   4832 		return nil
   4833 	}
   4834 	n--
   4835 	gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
   4836 	if n == 0 {
   4837 		return gp
   4838 	}
   4839 	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
   4840 	if t-h+n >= uint32(len(_p_.runq)) {
   4841 		throw("runqsteal: runq overflow")
   4842 	}
   4843 	atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
   4844 	return gp
   4845 }
   4846 
   4847 //go:linkname setMaxThreads runtime/debug.setMaxThreads
   4848 func setMaxThreads(in int) (out int) {
   4849 	lock(&sched.lock)
   4850 	out = int(sched.maxmcount)
   4851 	if in > 0x7fffffff { // MaxInt32
   4852 		sched.maxmcount = 0x7fffffff
   4853 	} else {
   4854 		sched.maxmcount = int32(in)
   4855 	}
   4856 	checkmcount()
   4857 	unlock(&sched.lock)
   4858 	return
   4859 }
   4860 
   4861 func haveexperiment(name string) bool {
   4862 	if name == "framepointer" {
   4863 		return framepointer_enabled // set by linker
   4864 	}
   4865 	x := sys.Goexperiment
   4866 	for x != "" {
   4867 		xname := ""
   4868 		i := index(x, ",")
   4869 		if i < 0 {
   4870 			xname, x = x, ""
   4871 		} else {
   4872 			xname, x = x[:i], x[i+1:]
   4873 		}
   4874 		if xname == name {
   4875 			return true
   4876 		}
   4877 		if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
   4878 			return false
   4879 		}
   4880 	}
   4881 	return false
   4882 }
   4883 
   4884 //go:nosplit
   4885 func procPin() int {
   4886 	_g_ := getg()
   4887 	mp := _g_.m
   4888 
   4889 	mp.locks++
   4890 	return int(mp.p.ptr().id)
   4891 }
   4892 
   4893 //go:nosplit
   4894 func procUnpin() {
   4895 	_g_ := getg()
   4896 	_g_.m.locks--
   4897 }
   4898 
   4899 //go:linkname sync_runtime_procPin sync.runtime_procPin
   4900 //go:nosplit
   4901 func sync_runtime_procPin() int {
   4902 	return procPin()
   4903 }
   4904 
   4905 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
   4906 //go:nosplit
   4907 func sync_runtime_procUnpin() {
   4908 	procUnpin()
   4909 }
   4910 
   4911 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
   4912 //go:nosplit
   4913 func sync_atomic_runtime_procPin() int {
   4914 	return procPin()
   4915 }
   4916 
   4917 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
   4918 //go:nosplit
   4919 func sync_atomic_runtime_procUnpin() {
   4920 	procUnpin()
   4921 }
   4922 
   4923 // Active spinning for sync.Mutex.
   4924 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
   4925 //go:nosplit
   4926 func sync_runtime_canSpin(i int) bool {
   4927 	// sync.Mutex is cooperative, so we are conservative with spinning.
   4928 	// Spin only few times and only if running on a multicore machine and
   4929 	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
   4930 	// As opposed to runtime mutex we don't do passive spinning here,
   4931 	// because there can be work on global runq on on other Ps.
   4932 	if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
   4933 		return false
   4934 	}
   4935 	if p := getg().m.p.ptr(); !runqempty(p) {
   4936 		return false
   4937 	}
   4938 	return true
   4939 }
   4940 
   4941 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
   4942 //go:nosplit
   4943 func sync_runtime_doSpin() {
   4944 	procyield(active_spin_cnt)
   4945 }
   4946 
   4947 var stealOrder randomOrder
   4948 
   4949 // randomOrder/randomEnum are helper types for randomized work stealing.
   4950 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
   4951 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
   4952 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
   4953 type randomOrder struct {
   4954 	count    uint32
   4955 	coprimes []uint32
   4956 }
   4957 
   4958 type randomEnum struct {
   4959 	i     uint32
   4960 	count uint32
   4961 	pos   uint32
   4962 	inc   uint32
   4963 }
   4964 
   4965 func (ord *randomOrder) reset(count uint32) {
   4966 	ord.count = count
   4967 	ord.coprimes = ord.coprimes[:0]
   4968 	for i := uint32(1); i <= count; i++ {
   4969 		if gcd(i, count) == 1 {
   4970 			ord.coprimes = append(ord.coprimes, i)
   4971 		}
   4972 	}
   4973 }
   4974 
   4975 func (ord *randomOrder) start(i uint32) randomEnum {
   4976 	return randomEnum{
   4977 		count: ord.count,
   4978 		pos:   i % ord.count,
   4979 		inc:   ord.coprimes[i%uint32(len(ord.coprimes))],
   4980 	}
   4981 }
   4982 
   4983 func (enum *randomEnum) done() bool {
   4984 	return enum.i == enum.count
   4985 }
   4986 
   4987 func (enum *randomEnum) next() {
   4988 	enum.i++
   4989 	enum.pos = (enum.pos + enum.inc) % enum.count
   4990 }
   4991 
   4992 func (enum *randomEnum) position() uint32 {
   4993 	return enum.pos
   4994 }
   4995 
   4996 func gcd(a, b uint32) uint32 {
   4997 	for b != 0 {
   4998 		a, b = b, a%b
   4999 	}
   5000 	return a
   5001 }
   5002