Home | History | Annotate | Download | only in runtime

Lines Matching full:allp

515 		for _, p := range allp {
648 if allp[pos].mcache == _g_.m.mcache {
656 mp.p.set(allp[pos])
657 mp.mcache = allp[pos].mcache
1023 for _, p := range allp {
1063 for _, p := range allp {
1366 for _, p := range allp {
1394 for _, p := range allp {
1423 for _, p := range allp {
2281 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2302 // Before we drop our P, make a snapshot of the allp slice,
2305 // everything up to cap(allp) is immutable.
2306 allpSnapshot := allp
3557 for _, _p_ := range allp {
3866 // Grow allp if necessary.
3867 if nprocs > int32(len(allp)) {
3871 if nprocs <= int32(cap(allp)) {
3872 allp = allp[:nprocs]
3875 // Copy everything up to allp's cap so we
3877 copy(nallp, allp[:cap(allp)])
3878 allp = nallp
3885 pp := allp[i]
3895 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3919 p := allp[i]
3978 // Trim allp.
3979 if int32(len(allp)) != nprocs {
3981 allp = allp[:nprocs]
3990 // release the current P and acquire allp[0]
3996 p := allp[0]
4006 p := allp[i]
4315 // Prevent allp slice changes. This lock will be completely
4318 // We can't use a range loop over allp because we may
4320 // allp each time around the loop.
4321 for i := 0; i < len(allp); i++ {
4322 _p_ := allp[i]
4325 // allp but not yet created new Ps.
4387 for _, _p_ := range allp {
4444 for i, _p_ := range allp {
4462 if i == len(allp)-1 {