Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Garbage collector: marking and scanning
      6 
      7 package runtime
      8 
      9 import (
     10 	"runtime/internal/atomic"
     11 	"runtime/internal/sys"
     12 	"unsafe"
     13 )
     14 
     15 const (
     16 	fixedRootFinalizers = iota
     17 	fixedRootFreeGStacks
     18 	fixedRootCount
     19 
     20 	// rootBlockBytes is the number of bytes to scan per data or
     21 	// BSS root.
     22 	rootBlockBytes = 256 << 10
     23 
     24 	// rootBlockSpans is the number of spans to scan per span
     25 	// root.
     26 	rootBlockSpans = 8 * 1024 // 64MB worth of spans
     27 
     28 	// maxObletBytes is the maximum bytes of an object to scan at
     29 	// once. Larger objects will be split up into "oblets" of at
     30 	// most this size. Since we can scan 12 MB/ms, 128 KB bounds
     31 	// scan preemption at ~100 s.
     32 	//
     33 	// This must be > _MaxSmallSize so that the object base is the
     34 	// span base.
     35 	maxObletBytes = 128 << 10
     36 
     37 	// drainCheckThreshold specifies how many units of work to do
     38 	// between self-preemption checks in gcDrain. Assuming a scan
     39 	// rate of 1 MB/ms, this is ~100 s. Lower values have higher
     40 	// overhead in the scan loop (the scheduler check may perform
     41 	// a syscall, so its overhead is nontrivial). Higher values
     42 	// make the system less responsive to incoming work.
     43 	drainCheckThreshold = 100000
     44 )
     45 
     46 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
     47 // some miscellany) and initializes scanning-related state.
     48 //
     49 // The caller must have call gcCopySpans().
     50 //
     51 // The world must be stopped.
     52 //
     53 //go:nowritebarrier
     54 func gcMarkRootPrepare() {
     55 	if gcphase == _GCmarktermination {
     56 		work.nFlushCacheRoots = int(gomaxprocs)
     57 	} else {
     58 		work.nFlushCacheRoots = 0
     59 	}
     60 
     61 	// Compute how many data and BSS root blocks there are.
     62 	nBlocks := func(bytes uintptr) int {
     63 		return int((bytes + rootBlockBytes - 1) / rootBlockBytes)
     64 	}
     65 
     66 	work.nDataRoots = 0
     67 	work.nBSSRoots = 0
     68 
     69 	// Only scan globals once per cycle; preferably concurrently.
     70 	if !work.markrootDone {
     71 		for _, datap := range activeModules() {
     72 			nDataRoots := nBlocks(datap.edata - datap.data)
     73 			if nDataRoots > work.nDataRoots {
     74 				work.nDataRoots = nDataRoots
     75 			}
     76 		}
     77 
     78 		for _, datap := range activeModules() {
     79 			nBSSRoots := nBlocks(datap.ebss - datap.bss)
     80 			if nBSSRoots > work.nBSSRoots {
     81 				work.nBSSRoots = nBSSRoots
     82 			}
     83 		}
     84 	}
     85 
     86 	if !work.markrootDone {
     87 		// On the first markroot, we need to scan span roots.
     88 		// In concurrent GC, this happens during concurrent
     89 		// mark and we depend on addfinalizer to ensure the
     90 		// above invariants for objects that get finalizers
     91 		// after concurrent mark. In STW GC, this will happen
     92 		// during mark termination.
     93 		//
     94 		// We're only interested in scanning the in-use spans,
     95 		// which will all be swept at this point. More spans
     96 		// may be added to this list during concurrent GC, but
     97 		// we only care about spans that were allocated before
     98 		// this mark phase.
     99 		work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
    100 
    101 		// On the first markroot, we need to scan all Gs. Gs
    102 		// may be created after this point, but it's okay that
    103 		// we ignore them because they begin life without any
    104 		// roots, so there's nothing to scan, and any roots
    105 		// they create during the concurrent phase will be
    106 		// scanned during mark termination. During mark
    107 		// termination, allglen isn't changing, so we'll scan
    108 		// all Gs.
    109 		work.nStackRoots = int(atomic.Loaduintptr(&allglen))
    110 	} else {
    111 		// We've already scanned span roots and kept the scan
    112 		// up-to-date during concurrent mark.
    113 		work.nSpanRoots = 0
    114 
    115 		// The hybrid barrier ensures that stacks can't
    116 		// contain pointers to unmarked objects, so on the
    117 		// second markroot, there's no need to scan stacks.
    118 		work.nStackRoots = 0
    119 
    120 		if debug.gcrescanstacks > 0 {
    121 			// Scan stacks anyway for debugging.
    122 			work.nStackRoots = int(atomic.Loaduintptr(&allglen))
    123 		}
    124 	}
    125 
    126 	work.markrootNext = 0
    127 	work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
    128 }
    129 
    130 // gcMarkRootCheck checks that all roots have been scanned. It is
    131 // purely for debugging.
    132 func gcMarkRootCheck() {
    133 	if work.markrootNext < work.markrootJobs {
    134 		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
    135 		throw("left over markroot jobs")
    136 	}
    137 
    138 	lock(&allglock)
    139 	// Check that stacks have been scanned.
    140 	var gp *g
    141 	if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 {
    142 		for i := 0; i < len(allgs); i++ {
    143 			gp = allgs[i]
    144 			if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead {
    145 				goto fail
    146 			}
    147 		}
    148 	} else {
    149 		for i := 0; i < work.nStackRoots; i++ {
    150 			gp = allgs[i]
    151 			if !gp.gcscandone {
    152 				goto fail
    153 			}
    154 		}
    155 	}
    156 	unlock(&allglock)
    157 	return
    158 
    159 fail:
    160 	println("gp", gp, "goid", gp.goid,
    161 		"status", readgstatus(gp),
    162 		"gcscandone", gp.gcscandone,
    163 		"gcscanvalid", gp.gcscanvalid)
    164 	unlock(&allglock) // Avoid self-deadlock with traceback.
    165 	throw("scan missed a g")
    166 }
    167 
    168 // ptrmask for an allocation containing a single pointer.
    169 var oneptrmask = [...]uint8{1}
    170 
    171 // markroot scans the i'th root.
    172 //
    173 // Preemption must be disabled (because this uses a gcWork).
    174 //
    175 // nowritebarrier is only advisory here.
    176 //
    177 //go:nowritebarrier
    178 func markroot(gcw *gcWork, i uint32) {
    179 	// TODO(austin): This is a bit ridiculous. Compute and store
    180 	// the bases in gcMarkRootPrepare instead of the counts.
    181 	baseFlushCache := uint32(fixedRootCount)
    182 	baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
    183 	baseBSS := baseData + uint32(work.nDataRoots)
    184 	baseSpans := baseBSS + uint32(work.nBSSRoots)
    185 	baseStacks := baseSpans + uint32(work.nSpanRoots)
    186 	end := baseStacks + uint32(work.nStackRoots)
    187 
    188 	// Note: if you add a case here, please also update heapdump.go:dumproots.
    189 	switch {
    190 	case baseFlushCache <= i && i < baseData:
    191 		flushmcache(int(i - baseFlushCache))
    192 
    193 	case baseData <= i && i < baseBSS:
    194 		for _, datap := range activeModules() {
    195 			markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
    196 		}
    197 
    198 	case baseBSS <= i && i < baseSpans:
    199 		for _, datap := range activeModules() {
    200 			markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
    201 		}
    202 
    203 	case i == fixedRootFinalizers:
    204 		// Only do this once per GC cycle since we don't call
    205 		// queuefinalizer during marking.
    206 		if work.markrootDone {
    207 			break
    208 		}
    209 		for fb := allfin; fb != nil; fb = fb.alllink {
    210 			cnt := uintptr(atomic.Load(&fb.cnt))
    211 			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
    212 		}
    213 
    214 	case i == fixedRootFreeGStacks:
    215 		// Only do this once per GC cycle; preferably
    216 		// concurrently.
    217 		if !work.markrootDone {
    218 			// Switch to the system stack so we can call
    219 			// stackfree.
    220 			systemstack(markrootFreeGStacks)
    221 		}
    222 
    223 	case baseSpans <= i && i < baseStacks:
    224 		// mark MSpan.specials
    225 		markrootSpans(gcw, int(i-baseSpans))
    226 
    227 	default:
    228 		// the rest is scanning goroutine stacks
    229 		var gp *g
    230 		if baseStacks <= i && i < end {
    231 			gp = allgs[i-baseStacks]
    232 		} else {
    233 			throw("markroot: bad index")
    234 		}
    235 
    236 		// remember when we've first observed the G blocked
    237 		// needed only to output in traceback
    238 		status := readgstatus(gp) // We are not in a scan state
    239 		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
    240 			gp.waitsince = work.tstart
    241 		}
    242 
    243 		// scang must be done on the system stack in case
    244 		// we're trying to scan our own stack.
    245 		systemstack(func() {
    246 			// If this is a self-scan, put the user G in
    247 			// _Gwaiting to prevent self-deadlock. It may
    248 			// already be in _Gwaiting if this is a mark
    249 			// worker or we're in mark termination.
    250 			userG := getg().m.curg
    251 			selfScan := gp == userG && readgstatus(userG) == _Grunning
    252 			if selfScan {
    253 				casgstatus(userG, _Grunning, _Gwaiting)
    254 				userG.waitreason = "garbage collection scan"
    255 			}
    256 
    257 			// TODO: scang blocks until gp's stack has
    258 			// been scanned, which may take a while for
    259 			// running goroutines. Consider doing this in
    260 			// two phases where the first is non-blocking:
    261 			// we scan the stacks we can and ask running
    262 			// goroutines to scan themselves; and the
    263 			// second blocks.
    264 			scang(gp, gcw)
    265 
    266 			if selfScan {
    267 				casgstatus(userG, _Gwaiting, _Grunning)
    268 			}
    269 		})
    270 	}
    271 }
    272 
    273 // markrootBlock scans the shard'th shard of the block of memory [b0,
    274 // b0+n0), with the given pointer mask.
    275 //
    276 //go:nowritebarrier
    277 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
    278 	if rootBlockBytes%(8*sys.PtrSize) != 0 {
    279 		// This is necessary to pick byte offsets in ptrmask0.
    280 		throw("rootBlockBytes must be a multiple of 8*ptrSize")
    281 	}
    282 
    283 	b := b0 + uintptr(shard)*rootBlockBytes
    284 	if b >= b0+n0 {
    285 		return
    286 	}
    287 	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
    288 	n := uintptr(rootBlockBytes)
    289 	if b+n > b0+n0 {
    290 		n = b0 + n0 - b
    291 	}
    292 
    293 	// Scan this shard.
    294 	scanblock(b, n, ptrmask, gcw)
    295 }
    296 
    297 // markrootFreeGStacks frees stacks of dead Gs.
    298 //
    299 // This does not free stacks of dead Gs cached on Ps, but having a few
    300 // cached stacks around isn't a problem.
    301 //
    302 //TODO go:nowritebarrier
    303 func markrootFreeGStacks() {
    304 	// Take list of dead Gs with stacks.
    305 	lock(&sched.gflock)
    306 	list := sched.gfreeStack
    307 	sched.gfreeStack = nil
    308 	unlock(&sched.gflock)
    309 	if list == nil {
    310 		return
    311 	}
    312 
    313 	// Free stacks.
    314 	tail := list
    315 	for gp := list; gp != nil; gp = gp.schedlink.ptr() {
    316 		shrinkstack(gp)
    317 		tail = gp
    318 	}
    319 
    320 	// Put Gs back on the free list.
    321 	lock(&sched.gflock)
    322 	tail.schedlink.set(sched.gfreeNoStack)
    323 	sched.gfreeNoStack = list
    324 	unlock(&sched.gflock)
    325 }
    326 
    327 // markrootSpans marks roots for one shard of work.spans.
    328 //
    329 //go:nowritebarrier
    330 func markrootSpans(gcw *gcWork, shard int) {
    331 	// Objects with finalizers have two GC-related invariants:
    332 	//
    333 	// 1) Everything reachable from the object must be marked.
    334 	// This ensures that when we pass the object to its finalizer,
    335 	// everything the finalizer can reach will be retained.
    336 	//
    337 	// 2) Finalizer specials (which are not in the garbage
    338 	// collected heap) are roots. In practice, this means the fn
    339 	// field must be scanned.
    340 	//
    341 	// TODO(austin): There are several ideas for making this more
    342 	// efficient in issue #11485.
    343 
    344 	if work.markrootDone {
    345 		throw("markrootSpans during second markroot")
    346 	}
    347 
    348 	sg := mheap_.sweepgen
    349 	spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
    350 	// Note that work.spans may not include spans that were
    351 	// allocated between entering the scan phase and now. This is
    352 	// okay because any objects with finalizers in those spans
    353 	// must have been allocated and given finalizers after we
    354 	// entered the scan phase, so addfinalizer will have ensured
    355 	// the above invariants for them.
    356 	for _, s := range spans {
    357 		if s.state != mSpanInUse {
    358 			continue
    359 		}
    360 		if !useCheckmark && s.sweepgen != sg {
    361 			// sweepgen was updated (+2) during non-checkmark GC pass
    362 			print("sweep ", s.sweepgen, " ", sg, "\n")
    363 			throw("gc: unswept span")
    364 		}
    365 
    366 		// Speculatively check if there are any specials
    367 		// without acquiring the span lock. This may race with
    368 		// adding the first special to a span, but in that
    369 		// case addfinalizer will observe that the GC is
    370 		// active (which is globally synchronized) and ensure
    371 		// the above invariants. We may also ensure the
    372 		// invariants, but it's okay to scan an object twice.
    373 		if s.specials == nil {
    374 			continue
    375 		}
    376 
    377 		// Lock the specials to prevent a special from being
    378 		// removed from the list while we're traversing it.
    379 		lock(&s.speciallock)
    380 
    381 		for sp := s.specials; sp != nil; sp = sp.next {
    382 			if sp.kind != _KindSpecialFinalizer {
    383 				continue
    384 			}
    385 			// don't mark finalized object, but scan it so we
    386 			// retain everything it points to.
    387 			spf := (*specialfinalizer)(unsafe.Pointer(sp))
    388 			// A finalizer can be set for an inner byte of an object, find object beginning.
    389 			p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
    390 
    391 			// Mark everything that can be reached from
    392 			// the object (but *not* the object itself or
    393 			// we'll never collect it).
    394 			scanobject(p, gcw)
    395 
    396 			// The special itself is a root.
    397 			scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw)
    398 		}
    399 
    400 		unlock(&s.speciallock)
    401 	}
    402 }
    403 
    404 // gcAssistAlloc performs GC work to make gp's assist debt positive.
    405 // gp must be the calling user gorountine.
    406 //
    407 // This must be called with preemption enabled.
    408 func gcAssistAlloc(gp *g) {
    409 	// Don't assist in non-preemptible contexts. These are
    410 	// generally fragile and won't allow the assist to block.
    411 	if getg() == gp.m.g0 {
    412 		return
    413 	}
    414 	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
    415 		return
    416 	}
    417 
    418 	traced := false
    419 retry:
    420 	// Compute the amount of scan work we need to do to make the
    421 	// balance positive. When the required amount of work is low,
    422 	// we over-assist to build up credit for future allocations
    423 	// and amortize the cost of assisting.
    424 	debtBytes := -gp.gcAssistBytes
    425 	scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
    426 	if scanWork < gcOverAssistWork {
    427 		scanWork = gcOverAssistWork
    428 		debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
    429 	}
    430 
    431 	// Steal as much credit as we can from the background GC's
    432 	// scan credit. This is racy and may drop the background
    433 	// credit below 0 if two mutators steal at the same time. This
    434 	// will just cause steals to fail until credit is accumulated
    435 	// again, so in the long run it doesn't really matter, but we
    436 	// do have to handle the negative credit case.
    437 	bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
    438 	stolen := int64(0)
    439 	if bgScanCredit > 0 {
    440 		if bgScanCredit < scanWork {
    441 			stolen = bgScanCredit
    442 			gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
    443 		} else {
    444 			stolen = scanWork
    445 			gp.gcAssistBytes += debtBytes
    446 		}
    447 		atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
    448 
    449 		scanWork -= stolen
    450 
    451 		if scanWork == 0 {
    452 			// We were able to steal all of the credit we
    453 			// needed.
    454 			if traced {
    455 				traceGCMarkAssistDone()
    456 			}
    457 			return
    458 		}
    459 	}
    460 
    461 	if trace.enabled && !traced {
    462 		traced = true
    463 		traceGCMarkAssistStart()
    464 	}
    465 
    466 	// Perform assist work
    467 	systemstack(func() {
    468 		gcAssistAlloc1(gp, scanWork)
    469 		// The user stack may have moved, so this can't touch
    470 		// anything on it until it returns from systemstack.
    471 	})
    472 
    473 	completed := gp.param != nil
    474 	gp.param = nil
    475 	if completed {
    476 		gcMarkDone()
    477 	}
    478 
    479 	if gp.gcAssistBytes < 0 {
    480 		// We were unable steal enough credit or perform
    481 		// enough work to pay off the assist debt. We need to
    482 		// do one of these before letting the mutator allocate
    483 		// more to prevent over-allocation.
    484 		//
    485 		// If this is because we were preempted, reschedule
    486 		// and try some more.
    487 		if gp.preempt {
    488 			Gosched()
    489 			goto retry
    490 		}
    491 
    492 		// Add this G to an assist queue and park. When the GC
    493 		// has more background credit, it will satisfy queued
    494 		// assists before flushing to the global credit pool.
    495 		//
    496 		// Note that this does *not* get woken up when more
    497 		// work is added to the work list. The theory is that
    498 		// there wasn't enough work to do anyway, so we might
    499 		// as well let background marking take care of the
    500 		// work that is available.
    501 		if !gcParkAssist() {
    502 			goto retry
    503 		}
    504 
    505 		// At this point either background GC has satisfied
    506 		// this G's assist debt, or the GC cycle is over.
    507 	}
    508 	if traced {
    509 		traceGCMarkAssistDone()
    510 	}
    511 }
    512 
    513 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
    514 // stack. This is a separate function to make it easier to see that
    515 // we're not capturing anything from the user stack, since the user
    516 // stack may move while we're in this function.
    517 //
    518 // gcAssistAlloc1 indicates whether this assist completed the mark
    519 // phase by setting gp.param to non-nil. This can't be communicated on
    520 // the stack since it may move.
    521 //
    522 //go:systemstack
    523 func gcAssistAlloc1(gp *g, scanWork int64) {
    524 	// Clear the flag indicating that this assist completed the
    525 	// mark phase.
    526 	gp.param = nil
    527 
    528 	if atomic.Load(&gcBlackenEnabled) == 0 {
    529 		// The gcBlackenEnabled check in malloc races with the
    530 		// store that clears it but an atomic check in every malloc
    531 		// would be a performance hit.
    532 		// Instead we recheck it here on the non-preemptable system
    533 		// stack to determine if we should preform an assist.
    534 
    535 		// GC is done, so ignore any remaining debt.
    536 		gp.gcAssistBytes = 0
    537 		return
    538 	}
    539 	// Track time spent in this assist. Since we're on the
    540 	// system stack, this is non-preemptible, so we can
    541 	// just measure start and end time.
    542 	startTime := nanotime()
    543 
    544 	decnwait := atomic.Xadd(&work.nwait, -1)
    545 	if decnwait == work.nproc {
    546 		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
    547 		throw("nwait > work.nprocs")
    548 	}
    549 
    550 	// gcDrainN requires the caller to be preemptible.
    551 	casgstatus(gp, _Grunning, _Gwaiting)
    552 	gp.waitreason = "GC assist marking"
    553 
    554 	// drain own cached work first in the hopes that it
    555 	// will be more cache friendly.
    556 	gcw := &getg().m.p.ptr().gcw
    557 	workDone := gcDrainN(gcw, scanWork)
    558 	// If we are near the end of the mark phase
    559 	// dispose of the gcw.
    560 	if gcBlackenPromptly {
    561 		gcw.dispose()
    562 	}
    563 
    564 	casgstatus(gp, _Gwaiting, _Grunning)
    565 
    566 	// Record that we did this much scan work.
    567 	//
    568 	// Back out the number of bytes of assist credit that
    569 	// this scan work counts for. The "1+" is a poor man's
    570 	// round-up, to ensure this adds credit even if
    571 	// assistBytesPerWork is very low.
    572 	gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
    573 
    574 	// If this is the last worker and we ran out of work,
    575 	// signal a completion point.
    576 	incnwait := atomic.Xadd(&work.nwait, +1)
    577 	if incnwait > work.nproc {
    578 		println("runtime: work.nwait=", incnwait,
    579 			"work.nproc=", work.nproc,
    580 			"gcBlackenPromptly=", gcBlackenPromptly)
    581 		throw("work.nwait > work.nproc")
    582 	}
    583 
    584 	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
    585 		// This has reached a background completion point. Set
    586 		// gp.param to a non-nil value to indicate this. It
    587 		// doesn't matter what we set it to (it just has to be
    588 		// a valid pointer).
    589 		gp.param = unsafe.Pointer(gp)
    590 	}
    591 	duration := nanotime() - startTime
    592 	_p_ := gp.m.p.ptr()
    593 	_p_.gcAssistTime += duration
    594 	if _p_.gcAssistTime > gcAssistTimeSlack {
    595 		atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
    596 		_p_.gcAssistTime = 0
    597 	}
    598 }
    599 
    600 // gcWakeAllAssists wakes all currently blocked assists. This is used
    601 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
    602 // new assists from going to sleep after this point.
    603 func gcWakeAllAssists() {
    604 	lock(&work.assistQueue.lock)
    605 	injectglist(work.assistQueue.head.ptr())
    606 	work.assistQueue.head.set(nil)
    607 	work.assistQueue.tail.set(nil)
    608 	unlock(&work.assistQueue.lock)
    609 }
    610 
    611 // gcParkAssist puts the current goroutine on the assist queue and parks.
    612 //
    613 // gcParkAssist returns whether the assist is now satisfied. If it
    614 // returns false, the caller must retry the assist.
    615 //
    616 //go:nowritebarrier
    617 func gcParkAssist() bool {
    618 	lock(&work.assistQueue.lock)
    619 	// If the GC cycle finished while we were getting the lock,
    620 	// exit the assist. The cycle can't finish while we hold the
    621 	// lock.
    622 	if atomic.Load(&gcBlackenEnabled) == 0 {
    623 		unlock(&work.assistQueue.lock)
    624 		return true
    625 	}
    626 
    627 	gp := getg()
    628 	oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail
    629 	if oldHead == 0 {
    630 		work.assistQueue.head.set(gp)
    631 	} else {
    632 		oldTail.ptr().schedlink.set(gp)
    633 	}
    634 	work.assistQueue.tail.set(gp)
    635 	gp.schedlink.set(nil)
    636 
    637 	// Recheck for background credit now that this G is in
    638 	// the queue, but can still back out. This avoids a
    639 	// race in case background marking has flushed more
    640 	// credit since we checked above.
    641 	if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
    642 		work.assistQueue.head = oldHead
    643 		work.assistQueue.tail = oldTail
    644 		if oldTail != 0 {
    645 			oldTail.ptr().schedlink.set(nil)
    646 		}
    647 		unlock(&work.assistQueue.lock)
    648 		return false
    649 	}
    650 	// Park.
    651 	goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlockGC, 2)
    652 	return true
    653 }
    654 
    655 // gcFlushBgCredit flushes scanWork units of background scan work
    656 // credit. This first satisfies blocked assists on the
    657 // work.assistQueue and then flushes any remaining credit to
    658 // gcController.bgScanCredit.
    659 //
    660 // Write barriers are disallowed because this is used by gcDrain after
    661 // it has ensured that all work is drained and this must preserve that
    662 // condition.
    663 //
    664 //go:nowritebarrierrec
    665 func gcFlushBgCredit(scanWork int64) {
    666 	if work.assistQueue.head == 0 {
    667 		// Fast path; there are no blocked assists. There's a
    668 		// small window here where an assist may add itself to
    669 		// the blocked queue and park. If that happens, we'll
    670 		// just get it on the next flush.
    671 		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
    672 		return
    673 	}
    674 
    675 	scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
    676 
    677 	lock(&work.assistQueue.lock)
    678 	gp := work.assistQueue.head.ptr()
    679 	for gp != nil && scanBytes > 0 {
    680 		// Note that gp.gcAssistBytes is negative because gp
    681 		// is in debt. Think carefully about the signs below.
    682 		if scanBytes+gp.gcAssistBytes >= 0 {
    683 			// Satisfy this entire assist debt.
    684 			scanBytes += gp.gcAssistBytes
    685 			gp.gcAssistBytes = 0
    686 			xgp := gp
    687 			gp = gp.schedlink.ptr()
    688 			// It's important that we *not* put xgp in
    689 			// runnext. Otherwise, it's possible for user
    690 			// code to exploit the GC worker's high
    691 			// scheduler priority to get itself always run
    692 			// before other goroutines and always in the
    693 			// fresh quantum started by GC.
    694 			ready(xgp, 0, false)
    695 		} else {
    696 			// Partially satisfy this assist.
    697 			gp.gcAssistBytes += scanBytes
    698 			scanBytes = 0
    699 			// As a heuristic, we move this assist to the
    700 			// back of the queue so that large assists
    701 			// can't clog up the assist queue and
    702 			// substantially delay small assists.
    703 			xgp := gp
    704 			gp = gp.schedlink.ptr()
    705 			if gp == nil {
    706 				// gp is the only assist in the queue.
    707 				gp = xgp
    708 			} else {
    709 				xgp.schedlink = 0
    710 				work.assistQueue.tail.ptr().schedlink.set(xgp)
    711 				work.assistQueue.tail.set(xgp)
    712 			}
    713 			break
    714 		}
    715 	}
    716 	work.assistQueue.head.set(gp)
    717 	if gp == nil {
    718 		work.assistQueue.tail.set(nil)
    719 	}
    720 
    721 	if scanBytes > 0 {
    722 		// Convert from scan bytes back to work.
    723 		scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
    724 		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
    725 	}
    726 	unlock(&work.assistQueue.lock)
    727 }
    728 
    729 // scanstack scans gp's stack, greying all pointers found on the stack.
    730 //
    731 // scanstack is marked go:systemstack because it must not be preempted
    732 // while using a workbuf.
    733 //
    734 //go:nowritebarrier
    735 //go:systemstack
    736 func scanstack(gp *g, gcw *gcWork) {
    737 	if gp.gcscanvalid {
    738 		return
    739 	}
    740 
    741 	if readgstatus(gp)&_Gscan == 0 {
    742 		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
    743 		throw("scanstack - bad status")
    744 	}
    745 
    746 	switch readgstatus(gp) &^ _Gscan {
    747 	default:
    748 		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
    749 		throw("mark - bad status")
    750 	case _Gdead:
    751 		return
    752 	case _Grunning:
    753 		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
    754 		throw("scanstack: goroutine not stopped")
    755 	case _Grunnable, _Gsyscall, _Gwaiting:
    756 		// ok
    757 	}
    758 
    759 	if gp == getg() {
    760 		throw("can't scan our own stack")
    761 	}
    762 	mp := gp.m
    763 	if mp != nil && mp.helpgc != 0 {
    764 		throw("can't scan gchelper stack")
    765 	}
    766 
    767 	// Shrink the stack if not much of it is being used. During
    768 	// concurrent GC, we can do this during concurrent mark.
    769 	if !work.markrootDone {
    770 		shrinkstack(gp)
    771 	}
    772 
    773 	// Scan the saved context register. This is effectively a live
    774 	// register that gets moved back and forth between the
    775 	// register and sched.ctxt without a write barrier.
    776 	if gp.sched.ctxt != nil {
    777 		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw)
    778 	}
    779 
    780 	// Scan the stack.
    781 	var cache pcvalueCache
    782 	scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
    783 		scanframeworker(frame, &cache, gcw)
    784 		return true
    785 	}
    786 	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
    787 	tracebackdefers(gp, scanframe, nil)
    788 	gp.gcscanvalid = true
    789 }
    790 
    791 // Scan a stack frame: local variables and function arguments/results.
    792 //go:nowritebarrier
    793 func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
    794 
    795 	f := frame.fn
    796 	targetpc := frame.continpc
    797 	if targetpc == 0 {
    798 		// Frame is dead.
    799 		return
    800 	}
    801 	if _DebugGC > 1 {
    802 		print("scanframe ", funcname(f), "\n")
    803 	}
    804 	if targetpc != f.entry {
    805 		targetpc--
    806 	}
    807 	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
    808 	if pcdata == -1 {
    809 		// We do not have a valid pcdata value but there might be a
    810 		// stackmap for this function. It is likely that we are looking
    811 		// at the function prologue, assume so and hope for the best.
    812 		pcdata = 0
    813 	}
    814 
    815 	// Scan local variables if stack frame has been allocated.
    816 	size := frame.varp - frame.sp
    817 	var minsize uintptr
    818 	switch sys.ArchFamily {
    819 	case sys.ARM64:
    820 		minsize = sys.SpAlign
    821 	default:
    822 		minsize = sys.MinFrameSize
    823 	}
    824 	if size > minsize {
    825 		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
    826 		if stkmap == nil || stkmap.n <= 0 {
    827 			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
    828 			throw("missing stackmap")
    829 		}
    830 
    831 		// Locals bitmap information, scan just the pointers in locals.
    832 		if pcdata < 0 || pcdata >= stkmap.n {
    833 			// don't know where we are
    834 			print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    835 			throw("scanframe: bad symbol table")
    836 		}
    837 		bv := stackmapdata(stkmap, pcdata)
    838 		size = uintptr(bv.n) * sys.PtrSize
    839 		scanblock(frame.varp-size, size, bv.bytedata, gcw)
    840 	}
    841 
    842 	// Scan arguments.
    843 	if frame.arglen > 0 {
    844 		var bv bitvector
    845 		if frame.argmap != nil {
    846 			bv = *frame.argmap
    847 		} else {
    848 			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
    849 			if stkmap == nil || stkmap.n <= 0 {
    850 				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
    851 				throw("missing stackmap")
    852 			}
    853 			if pcdata < 0 || pcdata >= stkmap.n {
    854 				// don't know where we are
    855 				print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    856 				throw("scanframe: bad symbol table")
    857 			}
    858 			bv = stackmapdata(stkmap, pcdata)
    859 		}
    860 		scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw)
    861 	}
    862 }
    863 
    864 type gcDrainFlags int
    865 
    866 const (
    867 	gcDrainUntilPreempt gcDrainFlags = 1 << iota
    868 	gcDrainNoBlock
    869 	gcDrainFlushBgCredit
    870 	gcDrainIdle
    871 	gcDrainFractional
    872 
    873 	// gcDrainBlock means neither gcDrainUntilPreempt or
    874 	// gcDrainNoBlock. It is the default, but callers should use
    875 	// the constant for documentation purposes.
    876 	gcDrainBlock gcDrainFlags = 0
    877 )
    878 
    879 // gcDrain scans roots and objects in work buffers, blackening grey
    880 // objects until all roots and work buffers have been drained.
    881 //
    882 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
    883 // is set. This implies gcDrainNoBlock.
    884 //
    885 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
    886 // to do. This implies gcDrainNoBlock.
    887 //
    888 // If flags&gcDrainFractional != 0, gcDrain self-preempts when
    889 // pollFractionalWorkerExit() returns true. This implies
    890 // gcDrainNoBlock.
    891 //
    892 // If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is
    893 // unable to get more work. Otherwise, it will block until all
    894 // blocking calls are blocked in gcDrain.
    895 //
    896 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
    897 // credit to gcController.bgScanCredit every gcCreditSlack units of
    898 // scan work.
    899 //
    900 //go:nowritebarrier
    901 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
    902 	if !writeBarrier.needed {
    903 		throw("gcDrain phase incorrect")
    904 	}
    905 
    906 	gp := getg().m.curg
    907 	preemptible := flags&gcDrainUntilPreempt != 0
    908 	blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0
    909 	flushBgCredit := flags&gcDrainFlushBgCredit != 0
    910 	idle := flags&gcDrainIdle != 0
    911 
    912 	initScanWork := gcw.scanWork
    913 
    914 	// checkWork is the scan work before performing the next
    915 	// self-preempt check.
    916 	checkWork := int64(1<<63 - 1)
    917 	var check func() bool
    918 	if flags&(gcDrainIdle|gcDrainFractional) != 0 {
    919 		checkWork = initScanWork + drainCheckThreshold
    920 		if idle {
    921 			check = pollWork
    922 		} else if flags&gcDrainFractional != 0 {
    923 			check = pollFractionalWorkerExit
    924 		}
    925 	}
    926 
    927 	// Drain root marking jobs.
    928 	if work.markrootNext < work.markrootJobs {
    929 		for !(preemptible && gp.preempt) {
    930 			job := atomic.Xadd(&work.markrootNext, +1) - 1
    931 			if job >= work.markrootJobs {
    932 				break
    933 			}
    934 			markroot(gcw, job)
    935 			if check != nil && check() {
    936 				goto done
    937 			}
    938 		}
    939 	}
    940 
    941 	// Drain heap marking jobs.
    942 	for !(preemptible && gp.preempt) {
    943 		// Try to keep work available on the global queue. We used to
    944 		// check if there were waiting workers, but it's better to
    945 		// just keep work available than to make workers wait. In the
    946 		// worst case, we'll do O(log(_WorkbufSize)) unnecessary
    947 		// balances.
    948 		if work.full == 0 {
    949 			gcw.balance()
    950 		}
    951 
    952 		var b uintptr
    953 		if blocking {
    954 			b = gcw.get()
    955 		} else {
    956 			b = gcw.tryGetFast()
    957 			if b == 0 {
    958 				b = gcw.tryGet()
    959 			}
    960 		}
    961 		if b == 0 {
    962 			// work barrier reached or tryGet failed.
    963 			break
    964 		}
    965 		scanobject(b, gcw)
    966 
    967 		// Flush background scan work credit to the global
    968 		// account if we've accumulated enough locally so
    969 		// mutator assists can draw on it.
    970 		if gcw.scanWork >= gcCreditSlack {
    971 			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
    972 			if flushBgCredit {
    973 				gcFlushBgCredit(gcw.scanWork - initScanWork)
    974 				initScanWork = 0
    975 			}
    976 			checkWork -= gcw.scanWork
    977 			gcw.scanWork = 0
    978 
    979 			if checkWork <= 0 {
    980 				checkWork += drainCheckThreshold
    981 				if check != nil && check() {
    982 					break
    983 				}
    984 			}
    985 		}
    986 	}
    987 
    988 	// In blocking mode, write barriers are not allowed after this
    989 	// point because we must preserve the condition that the work
    990 	// buffers are empty.
    991 
    992 done:
    993 	// Flush remaining scan work credit.
    994 	if gcw.scanWork > 0 {
    995 		atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
    996 		if flushBgCredit {
    997 			gcFlushBgCredit(gcw.scanWork - initScanWork)
    998 		}
    999 		gcw.scanWork = 0
   1000 	}
   1001 }
   1002 
   1003 // gcDrainN blackens grey objects until it has performed roughly
   1004 // scanWork units of scan work or the G is preempted. This is
   1005 // best-effort, so it may perform less work if it fails to get a work
   1006 // buffer. Otherwise, it will perform at least n units of work, but
   1007 // may perform more because scanning is always done in whole object
   1008 // increments. It returns the amount of scan work performed.
   1009 //
   1010 // The caller goroutine must be in a preemptible state (e.g.,
   1011 // _Gwaiting) to prevent deadlocks during stack scanning. As a
   1012 // consequence, this must be called on the system stack.
   1013 //
   1014 //go:nowritebarrier
   1015 //go:systemstack
   1016 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
   1017 	if !writeBarrier.needed {
   1018 		throw("gcDrainN phase incorrect")
   1019 	}
   1020 
   1021 	// There may already be scan work on the gcw, which we don't
   1022 	// want to claim was done by this call.
   1023 	workFlushed := -gcw.scanWork
   1024 
   1025 	gp := getg().m.curg
   1026 	for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
   1027 		// See gcDrain comment.
   1028 		if work.full == 0 {
   1029 			gcw.balance()
   1030 		}
   1031 
   1032 		// This might be a good place to add prefetch code...
   1033 		// if(wbuf.nobj > 4) {
   1034 		//         PREFETCH(wbuf->obj[wbuf.nobj - 3];
   1035 		//  }
   1036 		//
   1037 		b := gcw.tryGetFast()
   1038 		if b == 0 {
   1039 			b = gcw.tryGet()
   1040 		}
   1041 
   1042 		if b == 0 {
   1043 			// Try to do a root job.
   1044 			//
   1045 			// TODO: Assists should get credit for this
   1046 			// work.
   1047 			if work.markrootNext < work.markrootJobs {
   1048 				job := atomic.Xadd(&work.markrootNext, +1) - 1
   1049 				if job < work.markrootJobs {
   1050 					markroot(gcw, job)
   1051 					continue
   1052 				}
   1053 			}
   1054 			// No heap or root jobs.
   1055 			break
   1056 		}
   1057 		scanobject(b, gcw)
   1058 
   1059 		// Flush background scan work credit.
   1060 		if gcw.scanWork >= gcCreditSlack {
   1061 			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
   1062 			workFlushed += gcw.scanWork
   1063 			gcw.scanWork = 0
   1064 		}
   1065 	}
   1066 
   1067 	// Unlike gcDrain, there's no need to flush remaining work
   1068 	// here because this never flushes to bgScanCredit and
   1069 	// gcw.dispose will flush any remaining work to scanWork.
   1070 
   1071 	return workFlushed + gcw.scanWork
   1072 }
   1073 
   1074 // scanblock scans b as scanobject would, but using an explicit
   1075 // pointer bitmap instead of the heap bitmap.
   1076 //
   1077 // This is used to scan non-heap roots, so it does not update
   1078 // gcw.bytesMarked or gcw.scanWork.
   1079 //
   1080 //go:nowritebarrier
   1081 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
   1082 	// Use local copies of original parameters, so that a stack trace
   1083 	// due to one of the throws below shows the original block
   1084 	// base and extent.
   1085 	b := b0
   1086 	n := n0
   1087 
   1088 	arena_start := mheap_.arena_start
   1089 	arena_used := mheap_.arena_used
   1090 
   1091 	for i := uintptr(0); i < n; {
   1092 		// Find bits for the next word.
   1093 		bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
   1094 		if bits == 0 {
   1095 			i += sys.PtrSize * 8
   1096 			continue
   1097 		}
   1098 		for j := 0; j < 8 && i < n; j++ {
   1099 			if bits&1 != 0 {
   1100 				// Same work as in scanobject; see comments there.
   1101 				obj := *(*uintptr)(unsafe.Pointer(b + i))
   1102 				if obj != 0 && arena_start <= obj && obj < arena_used {
   1103 					if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
   1104 						greyobject(obj, b, i, hbits, span, gcw, objIndex)
   1105 					}
   1106 				}
   1107 			}
   1108 			bits >>= 1
   1109 			i += sys.PtrSize
   1110 		}
   1111 	}
   1112 }
   1113 
   1114 // scanobject scans the object starting at b, adding pointers to gcw.
   1115 // b must point to the beginning of a heap object or an oblet.
   1116 // scanobject consults the GC bitmap for the pointer mask and the
   1117 // spans for the size of the object.
   1118 //
   1119 //go:nowritebarrier
   1120 func scanobject(b uintptr, gcw *gcWork) {
   1121 	// Note that arena_used may change concurrently during
   1122 	// scanobject and hence scanobject may encounter a pointer to
   1123 	// a newly allocated heap object that is *not* in
   1124 	// [start,used). It will not mark this object; however, we
   1125 	// know that it was just installed by a mutator, which means
   1126 	// that mutator will execute a write barrier and take care of
   1127 	// marking it. This is even more pronounced on relaxed memory
   1128 	// architectures since we access arena_used without barriers
   1129 	// or synchronization, but the same logic applies.
   1130 	arena_start := mheap_.arena_start
   1131 	arena_used := mheap_.arena_used
   1132 
   1133 	// Find the bits for b and the size of the object at b.
   1134 	//
   1135 	// b is either the beginning of an object, in which case this
   1136 	// is the size of the object to scan, or it points to an
   1137 	// oblet, in which case we compute the size to scan below.
   1138 	hbits := heapBitsForAddr(b)
   1139 	s := spanOfUnchecked(b)
   1140 	n := s.elemsize
   1141 	if n == 0 {
   1142 		throw("scanobject n == 0")
   1143 	}
   1144 
   1145 	if n > maxObletBytes {
   1146 		// Large object. Break into oblets for better
   1147 		// parallelism and lower latency.
   1148 		if b == s.base() {
   1149 			// It's possible this is a noscan object (not
   1150 			// from greyobject, but from other code
   1151 			// paths), in which case we must *not* enqueue
   1152 			// oblets since their bitmaps will be
   1153 			// uninitialized.
   1154 			if s.spanclass.noscan() {
   1155 				// Bypass the whole scan.
   1156 				gcw.bytesMarked += uint64(n)
   1157 				return
   1158 			}
   1159 
   1160 			// Enqueue the other oblets to scan later.
   1161 			// Some oblets may be in b's scalar tail, but
   1162 			// these will be marked as "no more pointers",
   1163 			// so we'll drop out immediately when we go to
   1164 			// scan those.
   1165 			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
   1166 				if !gcw.putFast(oblet) {
   1167 					gcw.put(oblet)
   1168 				}
   1169 			}
   1170 		}
   1171 
   1172 		// Compute the size of the oblet. Since this object
   1173 		// must be a large object, s.base() is the beginning
   1174 		// of the object.
   1175 		n = s.base() + s.elemsize - b
   1176 		if n > maxObletBytes {
   1177 			n = maxObletBytes
   1178 		}
   1179 	}
   1180 
   1181 	var i uintptr
   1182 	for i = 0; i < n; i += sys.PtrSize {
   1183 		// Find bits for this word.
   1184 		if i != 0 {
   1185 			// Avoid needless hbits.next() on last iteration.
   1186 			hbits = hbits.next()
   1187 		}
   1188 		// Load bits once. See CL 22712 and issue 16973 for discussion.
   1189 		bits := hbits.bits()
   1190 		// During checkmarking, 1-word objects store the checkmark
   1191 		// in the type bit for the one word. The only one-word objects
   1192 		// are pointers, or else they'd be merged with other non-pointer
   1193 		// data into larger allocations.
   1194 		if i != 1*sys.PtrSize && bits&bitScan == 0 {
   1195 			break // no more pointers in this object
   1196 		}
   1197 		if bits&bitPointer == 0 {
   1198 			continue // not a pointer
   1199 		}
   1200 
   1201 		// Work here is duplicated in scanblock and above.
   1202 		// If you make changes here, make changes there too.
   1203 		obj := *(*uintptr)(unsafe.Pointer(b + i))
   1204 
   1205 		// At this point we have extracted the next potential pointer.
   1206 		// Check if it points into heap and not back at the current object.
   1207 		if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n {
   1208 			// Mark the object.
   1209 			if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
   1210 				greyobject(obj, b, i, hbits, span, gcw, objIndex)
   1211 			}
   1212 		}
   1213 	}
   1214 	gcw.bytesMarked += uint64(n)
   1215 	gcw.scanWork += int64(i)
   1216 }
   1217 
   1218 // Shade the object if it isn't already.
   1219 // The object is not nil and known to be in the heap.
   1220 // Preemption must be disabled.
   1221 //go:nowritebarrier
   1222 func shade(b uintptr) {
   1223 	if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 {
   1224 		gcw := &getg().m.p.ptr().gcw
   1225 		greyobject(obj, 0, 0, hbits, span, gcw, objIndex)
   1226 		if gcphase == _GCmarktermination || gcBlackenPromptly {
   1227 			// Ps aren't allowed to cache work during mark
   1228 			// termination.
   1229 			gcw.dispose()
   1230 		}
   1231 	}
   1232 }
   1233 
   1234 // obj is the start of an object with mark mbits.
   1235 // If it isn't already marked, mark it and enqueue into gcw.
   1236 // base and off are for debugging only and could be removed.
   1237 //
   1238 // See also wbBufFlush1, which partially duplicates this logic.
   1239 //
   1240 //go:nowritebarrierrec
   1241 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) {
   1242 	// obj should be start of allocation, and so must be at least pointer-aligned.
   1243 	if obj&(sys.PtrSize-1) != 0 {
   1244 		throw("greyobject: obj not pointer-aligned")
   1245 	}
   1246 	mbits := span.markBitsForIndex(objIndex)
   1247 
   1248 	if useCheckmark {
   1249 		if !mbits.isMarked() {
   1250 			printlock()
   1251 			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
   1252 			print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
   1253 
   1254 			// Dump the source (base) object
   1255 			gcDumpObject("base", base, off)
   1256 
   1257 			// Dump the object
   1258 			gcDumpObject("obj", obj, ^uintptr(0))
   1259 
   1260 			getg().m.traceback = 2
   1261 			throw("checkmark found unmarked object")
   1262 		}
   1263 		if hbits.isCheckmarked(span.elemsize) {
   1264 			return
   1265 		}
   1266 		hbits.setCheckmarked(span.elemsize)
   1267 		if !hbits.isCheckmarked(span.elemsize) {
   1268 			throw("setCheckmarked and isCheckmarked disagree")
   1269 		}
   1270 	} else {
   1271 		if debug.gccheckmark > 0 && span.isFree(objIndex) {
   1272 			print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
   1273 			gcDumpObject("base", base, off)
   1274 			gcDumpObject("obj", obj, ^uintptr(0))
   1275 			getg().m.traceback = 2
   1276 			throw("marking free object")
   1277 		}
   1278 
   1279 		// If marked we have nothing to do.
   1280 		if mbits.isMarked() {
   1281 			return
   1282 		}
   1283 		// mbits.setMarked() // Avoid extra call overhead with manual inlining.
   1284 		atomic.Or8(mbits.bytep, mbits.mask)
   1285 		// If this is a noscan object, fast-track it to black
   1286 		// instead of greying it.
   1287 		if span.spanclass.noscan() {
   1288 			gcw.bytesMarked += uint64(span.elemsize)
   1289 			return
   1290 		}
   1291 	}
   1292 
   1293 	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
   1294 	// seems like a nice optimization that can be added back in.
   1295 	// There needs to be time between the PREFETCH and the use.
   1296 	// Previously we put the obj in an 8 element buffer that is drained at a rate
   1297 	// to give the PREFETCH time to do its work.
   1298 	// Use of PREFETCHNTA might be more appropriate than PREFETCH
   1299 	if !gcw.putFast(obj) {
   1300 		gcw.put(obj)
   1301 	}
   1302 }
   1303 
   1304 // gcDumpObject dumps the contents of obj for debugging and marks the
   1305 // field at byte offset off in obj.
   1306 func gcDumpObject(label string, obj, off uintptr) {
   1307 	if obj < mheap_.arena_start || obj >= mheap_.arena_used {
   1308 		print(label, "=", hex(obj), " is not in the Go heap\n")
   1309 		return
   1310 	}
   1311 	k := obj >> _PageShift
   1312 	x := k
   1313 	x -= mheap_.arena_start >> _PageShift
   1314 	s := mheap_.spans[x]
   1315 	print(label, "=", hex(obj), " k=", hex(k))
   1316 	if s == nil {
   1317 		print(" s=nil\n")
   1318 		return
   1319 	}
   1320 	print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
   1321 	if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
   1322 		print(mSpanStateNames[s.state], "\n")
   1323 	} else {
   1324 		print("unknown(", s.state, ")\n")
   1325 	}
   1326 
   1327 	skipped := false
   1328 	size := s.elemsize
   1329 	if s.state == _MSpanManual && size == 0 {
   1330 		// We're printing something from a stack frame. We
   1331 		// don't know how big it is, so just show up to an
   1332 		// including off.
   1333 		size = off + sys.PtrSize
   1334 	}
   1335 	for i := uintptr(0); i < size; i += sys.PtrSize {
   1336 		// For big objects, just print the beginning (because
   1337 		// that usually hints at the object's type) and the
   1338 		// fields around off.
   1339 		if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
   1340 			skipped = true
   1341 			continue
   1342 		}
   1343 		if skipped {
   1344 			print(" ...\n")
   1345 			skipped = false
   1346 		}
   1347 		print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
   1348 		if i == off {
   1349 			print(" <==")
   1350 		}
   1351 		print("\n")
   1352 	}
   1353 	if skipped {
   1354 		print(" ...\n")
   1355 	}
   1356 }
   1357 
   1358 // gcmarknewobject marks a newly allocated object black. obj must
   1359 // not contain any non-nil pointers.
   1360 //
   1361 // This is nosplit so it can manipulate a gcWork without preemption.
   1362 //
   1363 //go:nowritebarrier
   1364 //go:nosplit
   1365 func gcmarknewobject(obj, size, scanSize uintptr) {
   1366 	if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen.
   1367 		throw("gcmarknewobject called while doing checkmark")
   1368 	}
   1369 	markBitsForAddr(obj).setMarked()
   1370 	gcw := &getg().m.p.ptr().gcw
   1371 	gcw.bytesMarked += uint64(size)
   1372 	gcw.scanWork += int64(scanSize)
   1373 	if gcBlackenPromptly {
   1374 		// There shouldn't be anything in the work queue, but
   1375 		// we still need to flush stats.
   1376 		gcw.dispose()
   1377 	}
   1378 }
   1379 
   1380 // gcMarkTinyAllocs greys all active tiny alloc blocks.
   1381 //
   1382 // The world must be stopped.
   1383 func gcMarkTinyAllocs() {
   1384 	for _, p := range allp {
   1385 		c := p.mcache
   1386 		if c == nil || c.tiny == 0 {
   1387 			continue
   1388 		}
   1389 		_, hbits, span, objIndex := heapBitsForObject(c.tiny, 0, 0)
   1390 		gcw := &p.gcw
   1391 		greyobject(c.tiny, 0, 0, hbits, span, gcw, objIndex)
   1392 		if gcBlackenPromptly {
   1393 			gcw.dispose()
   1394 		}
   1395 	}
   1396 }
   1397 
   1398 // Checkmarking
   1399 
   1400 // To help debug the concurrent GC we remark with the world
   1401 // stopped ensuring that any object encountered has their normal
   1402 // mark bit set. To do this we use an orthogonal bit
   1403 // pattern to indicate the object is marked. The following pattern
   1404 // uses the upper two bits in the object's boundary nibble.
   1405 // 01: scalar  not marked
   1406 // 10: pointer not marked
   1407 // 11: pointer     marked
   1408 // 00: scalar      marked
   1409 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
   1410 // The higher bit is 1 for pointers and 0 for scalars, whether the object
   1411 // is marked or not.
   1412 // The first nibble no longer holds the typeDead pattern indicating that the
   1413 // there are no more pointers in the object. This information is held
   1414 // in the second nibble.
   1415 
   1416 // If useCheckmark is true, marking of an object uses the
   1417 // checkmark bits (encoding above) instead of the standard
   1418 // mark bits.
   1419 var useCheckmark = false
   1420 
   1421 //go:nowritebarrier
   1422 func initCheckmarks() {
   1423 	useCheckmark = true
   1424 	for _, s := range mheap_.allspans {
   1425 		if s.state == _MSpanInUse {
   1426 			heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
   1427 		}
   1428 	}
   1429 }
   1430 
   1431 func clearCheckmarks() {
   1432 	useCheckmark = false
   1433 	for _, s := range mheap_.allspans {
   1434 		if s.state == _MSpanInUse {
   1435 			heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
   1436 		}
   1437 	}
   1438 }
   1439