Lines Matching full:sched
53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
319 lock(&sched.sudoglock)
321 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
322 s := sched.sudogcache
323 sched.sudogcache = s.next
327 unlock(&sched.sudoglock)
385 lock(&sched.sudoglock)
386 last.next = sched.sudogcache
387 sched.sudogcache = first
388 unlock(&sched.sudoglock)
459 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
480 sched.maxmcount = 10000
500 sched.lastpoll = uint64(nanotime())
534 // sched lock is held
535 if mcount() > sched.maxmcount {
536 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
549 lock(&sched.lock)
550 if sched.mnext+1 < sched.mnext {
553 mp.id = sched.mnext
554 sched.mnext++
575 unlock(&sched.lock)
602 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
614 lock(&sched.lock)
622 if n > sched.nmidle+1 { // one M is currently running
623 n = sched.nmidle + 1
625 unlock(&sched.lock)
630 lock(&sched.lock)
638 n -= sched.nmidle + 1 // one M is currently running
639 unlock(&sched.lock)
645 lock(&sched.lock)
661 unlock(&sched.lock)
665 // sched.stopwait to in order to request that all Gs permanently stop.
682 sched.stopwait = freezeStopWait
683 atomic.Store(&sched.gcwaiting, 1)
1015 lock(&sched.lock)
1016 sched.stopwait = gomaxprocs
1017 atomic.Store(&sched.gcwaiting, 1)
1021 sched.stopwait--
1031 sched.stopwait--
1041 sched.stopwait--
1043 wait := sched.stopwait > 0
1044 unlock(&sched.lock)
1050 if notetsleep(&sched.stopnote, 100*1000) {
1051 noteclear(&sched.stopnote)
1060 if sched.stopwait != 0 {
1096 lock(&sched.lock)
1104 sched.gcwaiting = 0
1105 if sched.sysmonwait != 0 {
1106 sched.sysmonwait = 0
1107 notewakeup(&sched.sysmonnote)
1109 unlock(&sched.lock)
1138 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1253 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
1277 lock(&sched.lock)
1278 sched.nmfreed++
1280 unlock(&sched.lock)
1294 lock(&sched.lock)
1313 m.freelink = sched.freem
1314 sched.freem = m
1316 unlock(&sched.lock)
1325 lock(&sched.lock)
1326 sched.nmfreed++
1328 unlock(&sched.lock)
1358 lock(&sched.lock)
1359 if sched.safePointWait != 0 {
1360 throw("forEachP: sched.safePointWait != 0")
1362 sched.safePointWait = gomaxprocs - 1
1363 sched.safePointFn = fn
1377 // Run safe point function for all idle Ps. sched.pidle will
1378 // not change because we hold sched.lock.
1379 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1382 sched.safePointWait--
1386 wait := sched.safePointWait > 0
1387 unlock(&sched.lock)
1413 if notetsleep(&sched.safePointNote, 100*1000) {
1414 noteclear(&sched.safePointNote)
1420 if sched.safePointWait != 0 {
1429 lock(&sched.lock)
1430 sched.safePointFn = nil
1431 unlock(&sched.lock)
1454 sched.safePointFn(p)
1455 lock(&sched.lock)
1456 sched.safePointWait--
1457 if sched.safePointWait == 0 {
1458 notewakeup(&sched.safePointNote)
1460 unlock(&sched.lock)
1491 if sched.freem != nil {
1492 lock(&sched.lock)
1494 for freem := sched.freem; freem != nil; {
1505 sched.freem = newList
1506 unlock(&sched.lock)
1514 // Windows and Plan 9 will layout sched stack on OS stack.
1618 atomic.Xadd(&sched.ngsys, -1)
1646 // The sched.pc will never be returned to, but setting it to
1651 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1652 gp.sched.sp = gp.stack.hi
1653 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
1654 gp.sched.lr = 0
1655 gp.sched.g = guintptr(unsafe.Pointer(gp))
1656 gp.syscallpc = gp.sched.pc
1657 gp.syscallsp = gp.sched.sp
1658 gp.stktopsp = gp.sched.sp
1671 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1680 // sched.ngfree, but that requires locking. Incrementing ngsys
1682 atomic.Xadd(&sched.ngsys, +1)
1722 atomic.Xadd(&sched.ngsys, +1)
1902 lock(&sched.lock)
1903 sched.nmsys++
1905 unlock(&sched.lock)
1944 lock(&sched.lock)
1946 unlock(&sched.lock)
1974 lock(&sched.lock)
1978 unlock(&sched.lock)
1982 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1990 unlock(&sched.lock)
2023 if !runqempty(_p_) || sched.runqsize != 0 {
2034 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
2038 lock(&sched.lock)
2039 if sched.gcwaiting != 0 {
2041 sched.stopwait--
2042 if sched.stopwait == 0 {
2043 notewakeup(&sched.stopnote)
2045 unlock(&sched.lock)
2049 sched.safePointFn(_p_)
2050 sched.safePointWait--
2051 if sched.safePointWait == 0 {
2052 notewakeup(&sched.safePointNote)
2055 if sched.runqsize != 0 {
2056 unlock(&sched.lock)
2062 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2063 unlock(&sched.lock)
2068 unlock(&sched.lock)
2075 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2134 if sched.gcwaiting == 0 {
2141 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2146 lock(&sched.lock)
2148 sched.stopwait--
2149 if sched.stopwait == 0 {
2150 notewakeup(&sched.stopnote)
2152 unlock(&sched.lock)
2179 hz := sched.profilehz
2193 gogo(&gp.sched)
2207 if sched.gcwaiting != 0 {
2229 if sched.runqsize != 0 {
2230 lock(&sched.lock)
2232 unlock(&sched.lock)
2245 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2259 if atomic.Load(&sched.npidle) == procs-1 {
2268 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2273 atomic.Xadd(&sched.nmspinning, 1)
2277 if sched.gcwaiting != 0 {
2309 lock(&sched.lock)
2310 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2311 unlock(&sched.lock)
2314 if sched.runqsize != 0 {
2316 unlock(&sched.lock)
2323 unlock(&sched.lock)
2341 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2349 lock(&sched.lock)
2351 unlock(&sched.lock)
2356 atomic.Xadd(&sched.nmspinning, 1)
2366 lock(&sched.lock)
2372 unlock(&sched.lock)
2377 atomic.Xadd(&sched.nmspinning, 1)
2385 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2393 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2395 lock(&sched.lock)
2397 unlock(&sched.lock)
2419 if sched.runqsize != 0 {
2426 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2441 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2448 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2464 lock(&sched.lock)
2472 unlock(&sched.lock)
2473 for ; n != 0 && sched.npidle != 0; n-- {
2499 if sched.gcwaiting != 0 {
2523 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2524 lock(&sched.lock)
2526 unlock(&sched.lock)
2610 lock(&sched.lock)
2612 unlock(&sched.lock)
2629 gogo(&gp.sched) // never return
2662 atomic.Xadd(&sched.ngsys, -1)
2705 gogo(&_g_.m.g0.sched)
2711 // save updates getg().sched to refer to pc and sp so that a following
2715 // can clobber getg().sched.
2722 _g_.sched.pc = pc
2723 _g_.sched.sp = sp
2724 _g_.sched.lr = 0
2725 _g_.sched.ret = 0
2726 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
2730 if _g_.sched.ctxt != nil {
2741 // make g->sched refer to the caller's stack segment, because
2776 // but can have inconsistent g->sched, do not let GC observe it.
2800 // systemstack itself clobbers g.sched.{pc,sp} and we might
2806 if atomic.Load(&sched.sysmonwait) != 0 {
2822 if sched.gcwaiting != 0 {
2827 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
2841 lock(&sched.lock)
2842 if atomic.Load(&sched.sysmonwait) != 0 {
2843 atomic.Store(&sched.sysmonwait, 0)
2844 notewakeup(&sched.sysmonnote)
2846 unlock(&sched.lock)
2853 lock(&sched.lock)
2854 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2860 if sched.stopwait--; sched.stopwait == 0 {
2861 notewakeup(&sched.stopnote)
2864 unlock(&sched.lock)
2883 _g_.syscallsp = _g_.sched.sp
2884 _g_.syscallpc = _g_.sched.pc
2887 sp2 := _g_.sched.sp
2897 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3014 if sched.stopwait == freezeStopWait {
3031 if sched.pidle != 0 {
3083 lock(&sched.lock)
3085 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3086 atomic.Store(&sched.sysmonwait, 0)
3087 notewakeup(&sched.sysmonnote)
3089 unlock(&sched.lock)
3106 lock(&sched.lock)
3110 } else if atomic.Load(&sched.sysmonwait) != 0 {
3111 atomic.Store(&sched.sysmonwait, 0)
3112 notewakeup(&sched.sysmonnote)
3114 unlock(&sched.lock)
3307 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
3308 newg.sched.sp = sp
3310 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
3311 newg.sched.g = guintptr(unsafe.Pointer(newg))
3312 gostartcallfn(&newg.sched, fn)
3319 atomic.Xadd(&sched.ngsys, +1)
3325 // Sched.goidgen is the last allocated id,
3326 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
3327 // At startup sched.goidgen=0, so main goroutine receives goid=1.
3328 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3342 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3372 lock(&sched.gflock)
3378 gp.schedlink.set(sched.gfreeNoStack)
3379 sched.gfreeNoStack = gp
3381 gp.schedlink.set(sched.gfreeStack)
3382 sched.gfreeStack = gp
3384 sched.ngfree++
3386 unlock(&sched.gflock)
3395 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) {
3396 lock(&sched.gflock)
3398 if sched.gfreeStack != nil {
3400 gp = sched.gfreeStack
3401 sched.gfreeStack = gp.schedlink.ptr()
3402 } else if sched.gfreeNoStack != nil {
3403 gp = sched.gfreeNoStack
3404 sched.gfreeNoStack = gp.schedlink.ptr()
3409 sched.ngfree--
3413 unlock(&sched.gflock)
3439 lock(&sched.gflock)
3445 gp.schedlink.set(sched.gfreeNoStack)
3446 sched.gfreeNoStack = gp
3448 gp.schedlink.set(sched.gfreeStack)
3449 sched.gfreeStack = gp
3451 sched.ngfree++
3453 unlock(&sched.gflock)
3556 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
3570 return int32(sched.mnext - sched.nmfreed)
3835 lock(&sched.lock)
3836 sched.profilehz = hz
3837 unlock(&sched.lock)
3846 // Change number of processors. The world is stopped, sched is locked.
3861 if sched.procresizetime != 0 {
3862 sched.totaltime += int64(old) * (now - sched.procresizetime)
3864 sched.procresizetime = now
4091 lock(&sched.lock)
4092 sched.nmidlelocked += v
4096 unlock(&sched.lock)
4101 // sched.lock must be held.
4118 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4123 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4186 lock(&sched.lock)
4187 sched.nmsys++
4189 unlock(&sched.lock)
4217 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4218 lock(&sched.lock)
4219 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4220 atomic.Store(&sched.sysmonwait, 1)
4221 unlock(&sched.lock)
4239 notetsleep(&sched.sysmonnote, maxsleep)
4243 lock(&sched.lock)
4244 atomic.Store(&sched.sysmonwait, 0)
4245 noteclear(&sched.sysmonnote)
4249 unlock(&sched.lock)
4256 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4259 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4341 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4344 // Drop allpLock so we can take sched.lock.
4436 lock(&sched.lock)
4437 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4439 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4469 unlock(&sched.lock)
4508 unlock(&sched.lock)
4512 // Sched must be locked.
4516 mp.schedlink = sched.midle
4517 sched.midle.set(mp)
4518 sched.nmidle++
4523 // Sched must be locked.
4527 mp := sched.midle.ptr()
4529 sched.midle = mp.schedlink
4530 sched.nmidle--
4536 // Sched must be locked.
4541 if sched.runqtail != 0 {
4542 sched.runqtail.ptr().schedlink.set(gp)
4544 sched.runqhead.set(gp)
4546 sched.runqtail.set(gp)
4547 sched.runqsize++
4551 // Sched must be locked.
4555 gp.schedlink = sched.runqhead
4556 sched.runqhead.set(gp)
4557 if sched.runqtail == 0 {
4558 sched.runqtail.set(gp)
4560 sched.runqsize++
4564 // Sched must be locked.
4567 if sched.runqtail != 0 {
4568 sched.runqtail.ptr().schedlink.set(ghead)
4570 sched.runqhead.set(ghead)
4572 sched.runqtail.set(gtail)
4573 sched.runqsize += n
4577 // Sched must be locked.
4579 if sched
4583 n := sched.runqsize/gomaxprocs + 1
4584 if n > sched.runqsize {
4585 n = sched.runqsize
4594 sched.runqsize -= n
4595 if sched.runqsize == 0 {
4596 sched.runqtail = 0
4599 gp := sched.runqhead.ptr()
4600 sched.runqhead = gp.schedlink
4603 gp1 := sched.runqhead.ptr()
4604 sched.runqhead = gp1.schedlink
4611 // Sched must be locked.
4618 _p_.link = sched.pidle
4619 sched.pidle.set(_p_)
4620 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
4624 // Sched must be locked.
4628 _p_ := sched.pidle.ptr()
4630 sched.pidle = _p_.link
4631 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
4734 lock(&sched.lock)
4736 unlock(&sched.lock)
4849 lock(&sched.lock)
4850 out = int(sched.maxmcount)
4852 sched.maxmcount = 0x7fffffff
4854 sched.maxmcount = int32(in)
4857 unlock(&sched.lock)
4932 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {