Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Garbage collector (GC).
      6 //
      7 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
      8 // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
      9 // non-generational and non-compacting. Allocation is done using size segregated per P allocation
     10 // areas to minimize fragmentation while eliminating locks in the common case.
     11 //
     12 // The algorithm decomposes into several steps.
     13 // This is a high level description of the algorithm being used. For an overview of GC a good
     14 // place to start is Richard Jones' gchandbook.org.
     15 //
     16 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
     17 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
     18 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
     19 // 966-975.
     20 // For journal quality proofs that these steps are complete, correct, and terminate see
     21 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
     22 // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
     23 //
     24 // 1. GC performs sweep termination.
     25 //
     26 //    a. Stop the world. This causes all Ps to reach a GC safe-point.
     27 //
     28 //    b. Sweep any unswept spans. There will only be unswept spans if
     29 //    this GC cycle was forced before the expected time.
     30 //
     31 // 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are
     32 // allowed to locally cache parts of the work queue.
     33 //
     34 //    a. Prepare for the mark phase by setting gcphase to _GCmark
     35 //    (from _GCoff), enabling the write barrier, enabling mutator
     36 //    assists, and enqueueing root mark jobs. No objects may be
     37 //    scanned until all Ps have enabled the write barrier, which is
     38 //    accomplished using STW.
     39 //
     40 //    b. Start the world. From this point, GC work is done by mark
     41 //    workers started by the scheduler and by assists performed as
     42 //    part of allocation. The write barrier shades both the
     43 //    overwritten pointer and the new pointer value for any pointer
     44 //    writes (see mbarrier.go for details). Newly allocated objects
     45 //    are immediately marked black.
     46 //
     47 //    c. GC performs root marking jobs. This includes scanning all
     48 //    stacks, shading all globals, and shading any heap pointers in
     49 //    off-heap runtime data structures. Scanning a stack stops a
     50 //    goroutine, shades any pointers found on its stack, and then
     51 //    resumes the goroutine.
     52 //
     53 //    d. GC drains the work queue of grey objects, scanning each grey
     54 //    object to black and shading all pointers found in the object
     55 //    (which in turn may add those pointers to the work queue).
     56 //
     57 // 3. Once the global work queue is empty (but local work queue caches
     58 // may still contain work), GC performs the "mark 2" sub-phase.
     59 //
     60 //    a. GC stops all workers, disables local work queue caches,
     61 //    flushes each P's local work queue cache to the global work queue
     62 //    cache, and reenables workers.
     63 //
     64 //    b. GC again drains the work queue, as in 2d above.
     65 //
     66 // 4. Once the work queue is empty, GC performs mark termination.
     67 //
     68 //    a. Stop the world.
     69 //
     70 //    b. Set gcphase to _GCmarktermination, and disable workers and
     71 //    assists.
     72 //
     73 //    c. Drain any remaining work from the work queue (typically there
     74 //    will be none).
     75 //
     76 //    d. Perform other housekeeping like flushing mcaches.
     77 //
     78 // 5. GC performs the sweep phase.
     79 //
     80 //    a. Prepare for the sweep phase by setting gcphase to _GCoff,
     81 //    setting up sweep state and disabling the write barrier.
     82 //
     83 //    b. Start the world. From this point on, newly allocated objects
     84 //    are white, and allocating sweeps spans before use if necessary.
     85 //
     86 //    c. GC does concurrent sweeping in the background and in response
     87 //    to allocation. See description below.
     88 //
     89 // 6. When sufficient allocation has taken place, replay the sequence
     90 // starting with 1 above. See discussion of GC rate below.
     91 
     92 // Concurrent sweep.
     93 //
     94 // The sweep phase proceeds concurrently with normal program execution.
     95 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
     96 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
     97 // At the end of STW mark termination all spans are marked as "needs sweeping".
     98 //
     99 // The background sweeper goroutine simply sweeps spans one-by-one.
    100 //
    101 // To avoid requesting more OS memory while there are unswept spans, when a
    102 // goroutine needs another span, it first attempts to reclaim that much memory
    103 // by sweeping. When a goroutine needs to allocate a new small-object span, it
    104 // sweeps small-object spans for the same object size until it frees at least
    105 // one object. When a goroutine needs to allocate large-object span from heap,
    106 // it sweeps spans until it frees at least that many pages into heap. There is
    107 // one case where this may not suffice: if a goroutine sweeps and frees two
    108 // nonadjacent one-page spans to the heap, it will allocate a new two-page
    109 // span, but there can still be other one-page unswept spans which could be
    110 // combined into a two-page span.
    111 //
    112 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
    113 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
    114 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
    115 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
    116 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
    117 // The finalizer goroutine is kicked off only when all spans are swept.
    118 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
    119 
    120 // GC rate.
    121 // Next GC is after we've allocated an extra amount of memory proportional to
    122 // the amount already in use. The proportion is controlled by GOGC environment variable
    123 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
    124 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
    125 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
    126 // (and also the amount of extra memory used).
    127 
    128 // Oblets
    129 //
    130 // In order to prevent long pauses while scanning large objects and to
    131 // improve parallelism, the garbage collector breaks up scan jobs for
    132 // objects larger than maxObletBytes into "oblets" of at most
    133 // maxObletBytes. When scanning encounters the beginning of a large
    134 // object, it scans only the first oblet and enqueues the remaining
    135 // oblets as new scan jobs.
    136 
    137 package runtime
    138 
    139 import (
    140 	"runtime/internal/atomic"
    141 	"runtime/internal/sys"
    142 	"unsafe"
    143 )
    144 
    145 const (
    146 	_DebugGC         = 0
    147 	_ConcurrentSweep = true
    148 	_FinBlockSize    = 4 * 1024
    149 
    150 	// sweepMinHeapDistance is a lower bound on the heap distance
    151 	// (in bytes) reserved for concurrent sweeping between GC
    152 	// cycles. This will be scaled by gcpercent/100.
    153 	sweepMinHeapDistance = 1024 * 1024
    154 )
    155 
    156 // heapminimum is the minimum heap size at which to trigger GC.
    157 // For small heaps, this overrides the usual GOGC*live set rule.
    158 //
    159 // When there is a very small live set but a lot of allocation, simply
    160 // collecting when the heap reaches GOGC*live results in many GC
    161 // cycles and high total per-GC overhead. This minimum amortizes this
    162 // per-GC overhead while keeping the heap reasonably small.
    163 //
    164 // During initialization this is set to 4MB*GOGC/100. In the case of
    165 // GOGC==0, this will set heapminimum to 0, resulting in constant
    166 // collection even when the heap size is small, which is useful for
    167 // debugging.
    168 var heapminimum uint64 = defaultHeapMinimum
    169 
    170 // defaultHeapMinimum is the value of heapminimum for GOGC==100.
    171 const defaultHeapMinimum = 4 << 20
    172 
    173 // Initialized from $GOGC.  GOGC=off means no GC.
    174 var gcpercent int32
    175 
    176 func gcinit() {
    177 	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
    178 		throw("size of Workbuf is suboptimal")
    179 	}
    180 
    181 	_ = setGCPercent(readgogc())
    182 	memstats.gc_trigger = heapminimum
    183 	// Compute the goal heap size based on the trigger:
    184 	//   trigger = marked * (1 + triggerRatio)
    185 	//   marked = trigger / (1 + triggerRatio)
    186 	//   goal = marked * (1 + GOGC/100)
    187 	//        = trigger / (1 + triggerRatio) * (1 + GOGC/100)
    188 	memstats.next_gc = uint64(float64(memstats.gc_trigger) / (1 + gcController.triggerRatio) * (1 + float64(gcpercent)/100))
    189 	if gcpercent < 0 {
    190 		memstats.next_gc = ^uint64(0)
    191 	}
    192 	work.startSema = 1
    193 	work.markDoneSema = 1
    194 }
    195 
    196 func readgogc() int32 {
    197 	p := gogetenv("GOGC")
    198 	if p == "off" {
    199 		return -1
    200 	}
    201 	if n, ok := atoi32(p); ok {
    202 		return n
    203 	}
    204 	return 100
    205 }
    206 
    207 // gcenable is called after the bulk of the runtime initialization,
    208 // just before we're about to start letting user code run.
    209 // It kicks off the background sweeper goroutine and enables GC.
    210 func gcenable() {
    211 	c := make(chan int, 1)
    212 	go bgsweep(c)
    213 	<-c
    214 	memstats.enablegc = true // now that runtime is initialized, GC is okay
    215 }
    216 
    217 //go:linkname setGCPercent runtime/debug.setGCPercent
    218 func setGCPercent(in int32) (out int32) {
    219 	lock(&mheap_.lock)
    220 	out = gcpercent
    221 	if in < 0 {
    222 		in = -1
    223 	}
    224 	gcpercent = in
    225 	heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
    226 	if gcController.triggerRatio > float64(gcpercent)/100 {
    227 		gcController.triggerRatio = float64(gcpercent) / 100
    228 	}
    229 	// This is either in gcinit or followed by a STW GC, both of
    230 	// which will reset other stats like memstats.gc_trigger and
    231 	// memstats.next_gc to appropriate values.
    232 	unlock(&mheap_.lock)
    233 	return out
    234 }
    235 
    236 // Garbage collector phase.
    237 // Indicates to write barrier and synchronization task to perform.
    238 var gcphase uint32
    239 
    240 // The compiler knows about this variable.
    241 // If you change it, you must change the compiler too.
    242 var writeBarrier struct {
    243 	enabled bool    // compiler emits a check of this before calling write barrier
    244 	pad     [3]byte // compiler uses 32-bit load for "enabled" field
    245 	needed  bool    // whether we need a write barrier for current GC phase
    246 	cgo     bool    // whether we need a write barrier for a cgo check
    247 	alignme uint64  // guarantee alignment so that compiler can use a 32 or 64-bit load
    248 }
    249 
    250 // gcBlackenEnabled is 1 if mutator assists and background mark
    251 // workers are allowed to blacken objects. This must only be set when
    252 // gcphase == _GCmark.
    253 var gcBlackenEnabled uint32
    254 
    255 // gcBlackenPromptly indicates that optimizations that may
    256 // hide work from the global work queue should be disabled.
    257 //
    258 // If gcBlackenPromptly is true, per-P gcWork caches should
    259 // be flushed immediately and new objects should be allocated black.
    260 //
    261 // There is a tension between allocating objects white and
    262 // allocating them black. If white and the objects die before being
    263 // marked they can be collected during this GC cycle. On the other
    264 // hand allocating them black will reduce _GCmarktermination latency
    265 // since more work is done in the mark phase. This tension is resolved
    266 // by allocating white until the mark phase is approaching its end and
    267 // then allocating black for the remainder of the mark phase.
    268 var gcBlackenPromptly bool
    269 
    270 const (
    271 	_GCoff             = iota // GC not running; sweeping in background, write barrier disabled
    272 	_GCmark                   // GC marking roots and workbufs: allocate black, write barrier ENABLED
    273 	_GCmarktermination        // GC mark termination: allocate black, P's help GC, write barrier ENABLED
    274 )
    275 
    276 //go:nosplit
    277 func setGCPhase(x uint32) {
    278 	atomic.Store(&gcphase, x)
    279 	writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
    280 	writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
    281 }
    282 
    283 // gcMarkWorkerMode represents the mode that a concurrent mark worker
    284 // should operate in.
    285 //
    286 // Concurrent marking happens through four different mechanisms. One
    287 // is mutator assists, which happen in response to allocations and are
    288 // not scheduled. The other three are variations in the per-P mark
    289 // workers and are distinguished by gcMarkWorkerMode.
    290 type gcMarkWorkerMode int
    291 
    292 const (
    293 	// gcMarkWorkerDedicatedMode indicates that the P of a mark
    294 	// worker is dedicated to running that mark worker. The mark
    295 	// worker should run without preemption.
    296 	gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota
    297 
    298 	// gcMarkWorkerFractionalMode indicates that a P is currently
    299 	// running the "fractional" mark worker. The fractional worker
    300 	// is necessary when GOMAXPROCS*gcGoalUtilization is not an
    301 	// integer. The fractional worker should run until it is
    302 	// preempted and will be scheduled to pick up the fractional
    303 	// part of GOMAXPROCS*gcGoalUtilization.
    304 	gcMarkWorkerFractionalMode
    305 
    306 	// gcMarkWorkerIdleMode indicates that a P is running the mark
    307 	// worker because it has nothing else to do. The idle worker
    308 	// should run until it is preempted and account its time
    309 	// against gcController.idleMarkTime.
    310 	gcMarkWorkerIdleMode
    311 )
    312 
    313 // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
    314 // to use in execution traces.
    315 var gcMarkWorkerModeStrings = [...]string{
    316 	"GC (dedicated)",
    317 	"GC (fractional)",
    318 	"GC (idle)",
    319 }
    320 
    321 // gcController implements the GC pacing controller that determines
    322 // when to trigger concurrent garbage collection and how much marking
    323 // work to do in mutator assists and background marking.
    324 //
    325 // It uses a feedback control algorithm to adjust the memstats.gc_trigger
    326 // trigger based on the heap growth and GC CPU utilization each cycle.
    327 // This algorithm optimizes for heap growth to match GOGC and for CPU
    328 // utilization between assist and background marking to be 25% of
    329 // GOMAXPROCS. The high-level design of this algorithm is documented
    330 // at https://golang.org/s/go15gcpacing.
    331 var gcController = gcControllerState{
    332 	// Initial trigger ratio guess.
    333 	triggerRatio: 7 / 8.0,
    334 }
    335 
    336 type gcControllerState struct {
    337 	// scanWork is the total scan work performed this cycle. This
    338 	// is updated atomically during the cycle. Updates occur in
    339 	// bounded batches, since it is both written and read
    340 	// throughout the cycle. At the end of the cycle, this is how
    341 	// much of the retained heap is scannable.
    342 	//
    343 	// Currently this is the bytes of heap scanned. For most uses,
    344 	// this is an opaque unit of work, but for estimation the
    345 	// definition is important.
    346 	scanWork int64
    347 
    348 	// bgScanCredit is the scan work credit accumulated by the
    349 	// concurrent background scan. This credit is accumulated by
    350 	// the background scan and stolen by mutator assists. This is
    351 	// updated atomically. Updates occur in bounded batches, since
    352 	// it is both written and read throughout the cycle.
    353 	bgScanCredit int64
    354 
    355 	// assistTime is the nanoseconds spent in mutator assists
    356 	// during this cycle. This is updated atomically. Updates
    357 	// occur in bounded batches, since it is both written and read
    358 	// throughout the cycle.
    359 	assistTime int64
    360 
    361 	// dedicatedMarkTime is the nanoseconds spent in dedicated
    362 	// mark workers during this cycle. This is updated atomically
    363 	// at the end of the concurrent mark phase.
    364 	dedicatedMarkTime int64
    365 
    366 	// fractionalMarkTime is the nanoseconds spent in the
    367 	// fractional mark worker during this cycle. This is updated
    368 	// atomically throughout the cycle and will be up-to-date if
    369 	// the fractional mark worker is not currently running.
    370 	fractionalMarkTime int64
    371 
    372 	// idleMarkTime is the nanoseconds spent in idle marking
    373 	// during this cycle. This is updated atomically throughout
    374 	// the cycle.
    375 	idleMarkTime int64
    376 
    377 	// markStartTime is the absolute start time in nanoseconds
    378 	// that assists and background mark workers started.
    379 	markStartTime int64
    380 
    381 	// dedicatedMarkWorkersNeeded is the number of dedicated mark
    382 	// workers that need to be started. This is computed at the
    383 	// beginning of each cycle and decremented atomically as
    384 	// dedicated mark workers get started.
    385 	dedicatedMarkWorkersNeeded int64
    386 
    387 	// assistWorkPerByte is the ratio of scan work to allocated
    388 	// bytes that should be performed by mutator assists. This is
    389 	// computed at the beginning of each cycle and updated every
    390 	// time heap_scan is updated.
    391 	assistWorkPerByte float64
    392 
    393 	// assistBytesPerWork is 1/assistWorkPerByte.
    394 	assistBytesPerWork float64
    395 
    396 	// fractionalUtilizationGoal is the fraction of wall clock
    397 	// time that should be spent in the fractional mark worker.
    398 	// For example, if the overall mark utilization goal is 25%
    399 	// and GOMAXPROCS is 6, one P will be a dedicated mark worker
    400 	// and this will be set to 0.5 so that 50% of the time some P
    401 	// is in a fractional mark worker. This is computed at the
    402 	// beginning of each cycle.
    403 	fractionalUtilizationGoal float64
    404 
    405 	// triggerRatio is the heap growth ratio at which the garbage
    406 	// collection cycle should start. E.g., if this is 0.6, then
    407 	// GC should start when the live heap has reached 1.6 times
    408 	// the heap size marked by the previous cycle. This should be
    409 	//  GOGC/100 so the trigger heap size is less than the goal
    410 	// heap size. This is updated at the end of of each cycle.
    411 	triggerRatio float64
    412 
    413 	_ [sys.CacheLineSize]byte
    414 
    415 	// fractionalMarkWorkersNeeded is the number of fractional
    416 	// mark workers that need to be started. This is either 0 or
    417 	// 1. This is potentially updated atomically at every
    418 	// scheduling point (hence it gets its own cache line).
    419 	fractionalMarkWorkersNeeded int64
    420 
    421 	_ [sys.CacheLineSize]byte
    422 }
    423 
    424 // startCycle resets the GC controller's state and computes estimates
    425 // for a new GC cycle. The caller must hold worldsema.
    426 func (c *gcControllerState) startCycle() {
    427 	c.scanWork = 0
    428 	c.bgScanCredit = 0
    429 	c.assistTime = 0
    430 	c.dedicatedMarkTime = 0
    431 	c.fractionalMarkTime = 0
    432 	c.idleMarkTime = 0
    433 
    434 	// If this is the first GC cycle or we're operating on a very
    435 	// small heap, fake heap_marked so it looks like gc_trigger is
    436 	// the appropriate growth from heap_marked, even though the
    437 	// real heap_marked may not have a meaningful value (on the
    438 	// first cycle) or may be much smaller (resulting in a large
    439 	// error response).
    440 	if memstats.gc_trigger <= heapminimum {
    441 		memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + c.triggerRatio))
    442 	}
    443 
    444 	// Re-compute the heap goal for this cycle in case something
    445 	// changed. This is the same calculation we use elsewhere.
    446 	memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
    447 	if gcpercent < 0 {
    448 		memstats.next_gc = ^uint64(0)
    449 	}
    450 
    451 	// Ensure that the heap goal is at least a little larger than
    452 	// the current live heap size. This may not be the case if GC
    453 	// start is delayed or if the allocation that pushed heap_live
    454 	// over gc_trigger is large or if the trigger is really close to
    455 	// GOGC. Assist is proportional to this distance, so enforce a
    456 	// minimum distance, even if it means going over the GOGC goal
    457 	// by a tiny bit.
    458 	if memstats.next_gc < memstats.heap_live+1024*1024 {
    459 		memstats.next_gc = memstats.heap_live + 1024*1024
    460 	}
    461 
    462 	// Compute the total mark utilization goal and divide it among
    463 	// dedicated and fractional workers.
    464 	totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization
    465 	c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal)
    466 	c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)
    467 	if c.fractionalUtilizationGoal > 0 {
    468 		c.fractionalMarkWorkersNeeded = 1
    469 	} else {
    470 		c.fractionalMarkWorkersNeeded = 0
    471 	}
    472 
    473 	// Clear per-P state
    474 	for _, p := range &allp {
    475 		if p == nil {
    476 			break
    477 		}
    478 		p.gcAssistTime = 0
    479 	}
    480 
    481 	// Compute initial values for controls that are updated
    482 	// throughout the cycle.
    483 	c.revise()
    484 
    485 	if debug.gcpacertrace > 0 {
    486 		print("pacer: assist ratio=", c.assistWorkPerByte,
    487 			" (scan ", memstats.heap_scan>>20, " MB in ",
    488 			work.initialHeapLive>>20, "->",
    489 			memstats.next_gc>>20, " MB)",
    490 			" workers=", c.dedicatedMarkWorkersNeeded,
    491 			"+", c.fractionalMarkWorkersNeeded, "\n")
    492 	}
    493 }
    494 
    495 // revise updates the assist ratio during the GC cycle to account for
    496 // improved estimates. This should be called either under STW or
    497 // whenever memstats.heap_scan or memstats.heap_live is updated (with
    498 // mheap_.lock held).
    499 //
    500 // It should only be called when gcBlackenEnabled != 0 (because this
    501 // is when assists are enabled and the necessary statistics are
    502 // available).
    503 //
    504 // TODO: Consider removing the periodic controller update altogether.
    505 // Since we switched to allocating black, in theory we shouldn't have
    506 // to change the assist ratio. However, this is still a useful hook
    507 // that we've found many uses for when experimenting.
    508 func (c *gcControllerState) revise() {
    509 	// Compute the expected scan work remaining.
    510 	//
    511 	// Note that we currently count allocations during GC as both
    512 	// scannable heap (heap_scan) and scan work completed
    513 	// (scanWork), so this difference won't be changed by
    514 	// allocations during GC.
    515 	//
    516 	// This particular estimate is a strict upper bound on the
    517 	// possible remaining scan work for the current heap.
    518 	// You might consider dividing this by 2 (or by
    519 	// (100+GOGC)/100) to counter this over-estimation, but
    520 	// benchmarks show that this has almost no effect on mean
    521 	// mutator utilization, heap size, or assist time and it
    522 	// introduces the danger of under-estimating and letting the
    523 	// mutator outpace the garbage collector.
    524 	scanWorkExpected := int64(memstats.heap_scan) - c.scanWork
    525 	if scanWorkExpected < 1000 {
    526 		// We set a somewhat arbitrary lower bound on
    527 		// remaining scan work since if we aim a little high,
    528 		// we can miss by a little.
    529 		//
    530 		// We *do* need to enforce that this is at least 1,
    531 		// since marking is racy and double-scanning objects
    532 		// may legitimately make the expected scan work
    533 		// negative.
    534 		scanWorkExpected = 1000
    535 	}
    536 
    537 	// Compute the heap distance remaining.
    538 	heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
    539 	if heapDistance <= 0 {
    540 		// This shouldn't happen, but if it does, avoid
    541 		// dividing by zero or setting the assist negative.
    542 		heapDistance = 1
    543 	}
    544 
    545 	// Compute the mutator assist ratio so by the time the mutator
    546 	// allocates the remaining heap bytes up to next_gc, it will
    547 	// have done (or stolen) the remaining amount of scan work.
    548 	c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance)
    549 	c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected)
    550 }
    551 
    552 // endCycle updates the GC controller state at the end of the
    553 // concurrent part of the GC cycle.
    554 func (c *gcControllerState) endCycle() {
    555 	h_t := c.triggerRatio // For debugging
    556 
    557 	// Proportional response gain for the trigger controller. Must
    558 	// be in [0, 1]. Lower values smooth out transient effects but
    559 	// take longer to respond to phase changes. Higher values
    560 	// react to phase changes quickly, but are more affected by
    561 	// transient changes. Values near 1 may be unstable.
    562 	const triggerGain = 0.5
    563 
    564 	// Compute next cycle trigger ratio. First, this computes the
    565 	// "error" for this cycle; that is, how far off the trigger
    566 	// was from what it should have been, accounting for both heap
    567 	// growth and GC CPU utilization. We compute the actual heap
    568 	// growth during this cycle and scale that by how far off from
    569 	// the goal CPU utilization we were (to estimate the heap
    570 	// growth if we had the desired CPU utilization). The
    571 	// difference between this estimate and the GOGC-based goal
    572 	// heap growth is the error.
    573 	goalGrowthRatio := float64(gcpercent) / 100
    574 	actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
    575 	assistDuration := nanotime() - c.markStartTime
    576 
    577 	// Assume background mark hit its utilization goal.
    578 	utilization := gcGoalUtilization
    579 	// Add assist utilization; avoid divide by zero.
    580 	if assistDuration > 0 {
    581 		utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs))
    582 	}
    583 
    584 	triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
    585 
    586 	// Finally, we adjust the trigger for next time by this error,
    587 	// damped by the proportional gain.
    588 	c.triggerRatio += triggerGain * triggerError
    589 	if c.triggerRatio < 0 {
    590 		// This can happen if the mutator is allocating very
    591 		// quickly or the GC is scanning very slowly.
    592 		c.triggerRatio = 0
    593 	} else if c.triggerRatio > goalGrowthRatio*0.95 {
    594 		// Ensure there's always a little margin so that the
    595 		// mutator assist ratio isn't infinity.
    596 		c.triggerRatio = goalGrowthRatio * 0.95
    597 	}
    598 
    599 	if debug.gcpacertrace > 0 {
    600 		// Print controller state in terms of the design
    601 		// document.
    602 		H_m_prev := memstats.heap_marked
    603 		H_T := memstats.gc_trigger
    604 		h_a := actualGrowthRatio
    605 		H_a := memstats.heap_live
    606 		h_g := goalGrowthRatio
    607 		H_g := int64(float64(H_m_prev) * (1 + h_g))
    608 		u_a := utilization
    609 		u_g := gcGoalUtilization
    610 		W_a := c.scanWork
    611 		print("pacer: H_m_prev=", H_m_prev,
    612 			" h_t=", h_t, " H_T=", H_T,
    613 			" h_a=", h_a, " H_a=", H_a,
    614 			" h_g=", h_g, " H_g=", H_g,
    615 			" u_a=", u_a, " u_g=", u_g,
    616 			" W_a=", W_a,
    617 			" goal=", goalGrowthRatio-h_t,
    618 			" actual=", h_a-h_t,
    619 			" u_a/u_g=", u_a/u_g,
    620 			"\n")
    621 	}
    622 }
    623 
    624 // enlistWorker encourages another dedicated mark worker to start on
    625 // another P if there are spare worker slots. It is used by putfull
    626 // when more work is made available.
    627 //
    628 //go:nowritebarrier
    629 func (c *gcControllerState) enlistWorker() {
    630 	// If there are idle Ps, wake one so it will run an idle worker.
    631 	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
    632 	//
    633 	//	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
    634 	//		wakep()
    635 	//		return
    636 	//	}
    637 
    638 	// There are no idle Ps. If we need more dedicated workers,
    639 	// try to preempt a running P so it will switch to a worker.
    640 	if c.dedicatedMarkWorkersNeeded <= 0 {
    641 		return
    642 	}
    643 	// Pick a random other P to preempt.
    644 	if gomaxprocs <= 1 {
    645 		return
    646 	}
    647 	gp := getg()
    648 	if gp == nil || gp.m == nil || gp.m.p == 0 {
    649 		return
    650 	}
    651 	myID := gp.m.p.ptr().id
    652 	for tries := 0; tries < 5; tries++ {
    653 		id := int32(fastrand() % uint32(gomaxprocs-1))
    654 		if id >= myID {
    655 			id++
    656 		}
    657 		p := allp[id]
    658 		if p.status != _Prunning {
    659 			continue
    660 		}
    661 		if preemptone(p) {
    662 			return
    663 		}
    664 	}
    665 }
    666 
    667 // findRunnableGCWorker returns the background mark worker for _p_ if it
    668 // should be run. This must only be called when gcBlackenEnabled != 0.
    669 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
    670 	if gcBlackenEnabled == 0 {
    671 		throw("gcControllerState.findRunnable: blackening not enabled")
    672 	}
    673 	if _p_.gcBgMarkWorker == 0 {
    674 		// The mark worker associated with this P is blocked
    675 		// performing a mark transition. We can't run it
    676 		// because it may be on some other run or wait queue.
    677 		return nil
    678 	}
    679 
    680 	if !gcMarkWorkAvailable(_p_) {
    681 		// No work to be done right now. This can happen at
    682 		// the end of the mark phase when there are still
    683 		// assists tapering off. Don't bother running a worker
    684 		// now because it'll just return immediately.
    685 		return nil
    686 	}
    687 
    688 	decIfPositive := func(ptr *int64) bool {
    689 		if *ptr > 0 {
    690 			if atomic.Xaddint64(ptr, -1) >= 0 {
    691 				return true
    692 			}
    693 			// We lost a race
    694 			atomic.Xaddint64(ptr, +1)
    695 		}
    696 		return false
    697 	}
    698 
    699 	if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
    700 		// This P is now dedicated to marking until the end of
    701 		// the concurrent mark phase.
    702 		_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
    703 		// TODO(austin): This P isn't going to run anything
    704 		// else for a while, so kick everything out of its run
    705 		// queue.
    706 	} else {
    707 		if !decIfPositive(&c.fractionalMarkWorkersNeeded) {
    708 			// No more workers are need right now.
    709 			return nil
    710 		}
    711 
    712 		// This P has picked the token for the fractional worker.
    713 		// Is the GC currently under or at the utilization goal?
    714 		// If so, do more work.
    715 		//
    716 		// We used to check whether doing one time slice of work
    717 		// would remain under the utilization goal, but that has the
    718 		// effect of delaying work until the mutator has run for
    719 		// enough time slices to pay for the work. During those time
    720 		// slices, write barriers are enabled, so the mutator is running slower.
    721 		// Now instead we do the work whenever we're under or at the
    722 		// utilization work and pay for it by letting the mutator run later.
    723 		// This doesn't change the overall utilization averages, but it
    724 		// front loads the GC work so that the GC finishes earlier and
    725 		// write barriers can be turned off sooner, effectively giving
    726 		// the mutator a faster machine.
    727 		//
    728 		// The old, slower behavior can be restored by setting
    729 		//	gcForcePreemptNS = forcePreemptNS.
    730 		const gcForcePreemptNS = 0
    731 
    732 		// TODO(austin): We could fast path this and basically
    733 		// eliminate contention on c.fractionalMarkWorkersNeeded by
    734 		// precomputing the minimum time at which it's worth
    735 		// next scheduling the fractional worker. Then Ps
    736 		// don't have to fight in the window where we've
    737 		// passed that deadline and no one has started the
    738 		// worker yet.
    739 		//
    740 		// TODO(austin): Shorter preemption interval for mark
    741 		// worker to improve fairness and give this
    742 		// finer-grained control over schedule?
    743 		now := nanotime() - gcController.markStartTime
    744 		then := now + gcForcePreemptNS
    745 		timeUsed := c.fractionalMarkTime + gcForcePreemptNS
    746 		if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
    747 			// Nope, we'd overshoot the utilization goal
    748 			atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1)
    749 			return nil
    750 		}
    751 		_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
    752 	}
    753 
    754 	// Run the background mark worker
    755 	gp := _p_.gcBgMarkWorker.ptr()
    756 	casgstatus(gp, _Gwaiting, _Grunnable)
    757 	if trace.enabled {
    758 		traceGoUnpark(gp, 0)
    759 	}
    760 	return gp
    761 }
    762 
    763 // gcGoalUtilization is the goal CPU utilization for background
    764 // marking as a fraction of GOMAXPROCS.
    765 const gcGoalUtilization = 0.25
    766 
    767 // gcCreditSlack is the amount of scan work credit that can can
    768 // accumulate locally before updating gcController.scanWork and,
    769 // optionally, gcController.bgScanCredit. Lower values give a more
    770 // accurate assist ratio and make it more likely that assists will
    771 // successfully steal background credit. Higher values reduce memory
    772 // contention.
    773 const gcCreditSlack = 2000
    774 
    775 // gcAssistTimeSlack is the nanoseconds of mutator assist time that
    776 // can accumulate on a P before updating gcController.assistTime.
    777 const gcAssistTimeSlack = 5000
    778 
    779 // gcOverAssistWork determines how many extra units of scan work a GC
    780 // assist does when an assist happens. This amortizes the cost of an
    781 // assist by pre-paying for this many bytes of future allocations.
    782 const gcOverAssistWork = 64 << 10
    783 
    784 var work struct {
    785 	full  uint64                   // lock-free list of full blocks workbuf
    786 	empty uint64                   // lock-free list of empty blocks workbuf
    787 	pad0  [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
    788 
    789 	// bytesMarked is the number of bytes marked this cycle. This
    790 	// includes bytes blackened in scanned objects, noscan objects
    791 	// that go straight to black, and permagrey objects scanned by
    792 	// markroot during the concurrent scan phase. This is updated
    793 	// atomically during the cycle. Updates may be batched
    794 	// arbitrarily, since the value is only read at the end of the
    795 	// cycle.
    796 	//
    797 	// Because of benign races during marking, this number may not
    798 	// be the exact number of marked bytes, but it should be very
    799 	// close.
    800 	//
    801 	// Put this field here because it needs 64-bit atomic access
    802 	// (and thus 8-byte alignment even on 32-bit architectures).
    803 	bytesMarked uint64
    804 
    805 	markrootNext uint32 // next markroot job
    806 	markrootJobs uint32 // number of markroot jobs
    807 
    808 	nproc   uint32
    809 	tstart  int64
    810 	nwait   uint32
    811 	ndone   uint32
    812 	alldone note
    813 
    814 	// helperDrainBlock indicates that GC mark termination helpers
    815 	// should pass gcDrainBlock to gcDrain to block in the
    816 	// getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
    817 	//
    818 	// TODO: This is a temporary fallback to support
    819 	// debug.gcrescanstacks > 0 and to work around some known
    820 	// races. Remove this when we remove the debug option and fix
    821 	// the races.
    822 	helperDrainBlock bool
    823 
    824 	// Number of roots of various root types. Set by gcMarkRootPrepare.
    825 	nFlushCacheRoots                                             int
    826 	nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nRescanRoots int
    827 
    828 	// markrootDone indicates that roots have been marked at least
    829 	// once during the current GC cycle. This is checked by root
    830 	// marking operations that have to happen only during the
    831 	// first root marking pass, whether that's during the
    832 	// concurrent mark phase in current GC or mark termination in
    833 	// STW GC.
    834 	markrootDone bool
    835 
    836 	// Each type of GC state transition is protected by a lock.
    837 	// Since multiple threads can simultaneously detect the state
    838 	// transition condition, any thread that detects a transition
    839 	// condition must acquire the appropriate transition lock,
    840 	// re-check the transition condition and return if it no
    841 	// longer holds or perform the transition if it does.
    842 	// Likewise, any transition must invalidate the transition
    843 	// condition before releasing the lock. This ensures that each
    844 	// transition is performed by exactly one thread and threads
    845 	// that need the transition to happen block until it has
    846 	// happened.
    847 	//
    848 	// startSema protects the transition from "off" to mark or
    849 	// mark termination.
    850 	startSema uint32
    851 	// markDoneSema protects transitions from mark 1 to mark 2 and
    852 	// from mark 2 to mark termination.
    853 	markDoneSema uint32
    854 
    855 	bgMarkReady note   // signal background mark worker has started
    856 	bgMarkDone  uint32 // cas to 1 when at a background mark completion point
    857 	// Background mark completion signaling
    858 
    859 	// mode is the concurrency mode of the current GC cycle.
    860 	mode gcMode
    861 
    862 	// totaltime is the CPU nanoseconds spent in GC since the
    863 	// program started if debug.gctrace > 0.
    864 	totaltime int64
    865 
    866 	// initialHeapLive is the value of memstats.heap_live at the
    867 	// beginning of this GC cycle.
    868 	initialHeapLive uint64
    869 
    870 	// assistQueue is a queue of assists that are blocked because
    871 	// there was neither enough credit to steal or enough work to
    872 	// do.
    873 	assistQueue struct {
    874 		lock       mutex
    875 		head, tail guintptr
    876 	}
    877 
    878 	// rescan is a list of G's that need to be rescanned during
    879 	// mark termination. A G adds itself to this list when it
    880 	// first invalidates its stack scan.
    881 	rescan struct {
    882 		lock mutex
    883 		list []guintptr
    884 	}
    885 
    886 	// Timing/utilization stats for this cycle.
    887 	stwprocs, maxprocs                 int32
    888 	tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
    889 
    890 	pauseNS    int64 // total STW time this cycle
    891 	pauseStart int64 // nanotime() of last STW
    892 
    893 	// debug.gctrace heap sizes for this cycle.
    894 	heap0, heap1, heap2, heapGoal uint64
    895 }
    896 
    897 // GC runs a garbage collection and blocks the caller until the
    898 // garbage collection is complete. It may also block the entire
    899 // program.
    900 func GC() {
    901 	gcStart(gcForceBlockMode, false)
    902 }
    903 
    904 // gcMode indicates how concurrent a GC cycle should be.
    905 type gcMode int
    906 
    907 const (
    908 	gcBackgroundMode gcMode = iota // concurrent GC and sweep
    909 	gcForceMode                    // stop-the-world GC now, concurrent sweep
    910 	gcForceBlockMode               // stop-the-world GC now and STW sweep (forced by user)
    911 )
    912 
    913 // gcShouldStart returns true if the exit condition for the _GCoff
    914 // phase has been met. The exit condition should be tested when
    915 // allocating.
    916 //
    917 // If forceTrigger is true, it ignores the current heap size, but
    918 // checks all other conditions. In general this should be false.
    919 func gcShouldStart(forceTrigger bool) bool {
    920 	return gcphase == _GCoff && (forceTrigger || memstats.heap_live >= memstats.gc_trigger) && memstats.enablegc && panicking == 0 && gcpercent >= 0
    921 }
    922 
    923 // gcStart transitions the GC from _GCoff to _GCmark (if mode ==
    924 // gcBackgroundMode) or _GCmarktermination (if mode !=
    925 // gcBackgroundMode) by performing sweep termination and GC
    926 // initialization.
    927 //
    928 // This may return without performing this transition in some cases,
    929 // such as when called on a system stack or with locks held.
    930 func gcStart(mode gcMode, forceTrigger bool) {
    931 	// Since this is called from malloc and malloc is called in
    932 	// the guts of a number of libraries that might be holding
    933 	// locks, don't attempt to start GC in non-preemptible or
    934 	// potentially unstable situations.
    935 	mp := acquirem()
    936 	if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
    937 		releasem(mp)
    938 		return
    939 	}
    940 	releasem(mp)
    941 	mp = nil
    942 
    943 	// Pick up the remaining unswept/not being swept spans concurrently
    944 	//
    945 	// This shouldn't happen if we're being invoked in background
    946 	// mode since proportional sweep should have just finished
    947 	// sweeping everything, but rounding errors, etc, may leave a
    948 	// few spans unswept. In forced mode, this is necessary since
    949 	// GC can be forced at any point in the sweeping cycle.
    950 	//
    951 	// We check the transition condition continuously here in case
    952 	// this G gets delayed in to the next GC cycle.
    953 	for (mode != gcBackgroundMode || gcShouldStart(forceTrigger)) && gosweepone() != ^uintptr(0) {
    954 		sweep.nbgsweep++
    955 	}
    956 
    957 	// Perform GC initialization and the sweep termination
    958 	// transition.
    959 	//
    960 	// If this is a forced GC, don't acquire the transition lock
    961 	// or re-check the transition condition because we
    962 	// specifically *don't* want to share the transition with
    963 	// another thread.
    964 	useStartSema := mode == gcBackgroundMode
    965 	if useStartSema {
    966 		semacquire(&work.startSema, 0)
    967 		// Re-check transition condition under transition lock.
    968 		if !gcShouldStart(forceTrigger) {
    969 			semrelease(&work.startSema)
    970 			return
    971 		}
    972 	}
    973 
    974 	// For stats, check if this GC was forced by the user.
    975 	forced := mode != gcBackgroundMode
    976 
    977 	// In gcstoptheworld debug mode, upgrade the mode accordingly.
    978 	// We do this after re-checking the transition condition so
    979 	// that multiple goroutines that detect the heap trigger don't
    980 	// start multiple STW GCs.
    981 	if mode == gcBackgroundMode {
    982 		if debug.gcstoptheworld == 1 {
    983 			mode = gcForceMode
    984 		} else if debug.gcstoptheworld == 2 {
    985 			mode = gcForceBlockMode
    986 		}
    987 	}
    988 
    989 	// Ok, we're doing it!  Stop everybody else
    990 	semacquire(&worldsema, 0)
    991 
    992 	if trace.enabled {
    993 		traceGCStart()
    994 	}
    995 
    996 	if mode == gcBackgroundMode {
    997 		gcBgMarkStartWorkers()
    998 	}
    999 
   1000 	gcResetMarkState()
   1001 
   1002 	now := nanotime()
   1003 	work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
   1004 	work.tSweepTerm = now
   1005 	work.heap0 = memstats.heap_live
   1006 	work.pauseNS = 0
   1007 	work.mode = mode
   1008 
   1009 	work.pauseStart = now
   1010 	systemstack(stopTheWorldWithSema)
   1011 	// Finish sweep before we start concurrent scan.
   1012 	systemstack(func() {
   1013 		finishsweep_m()
   1014 	})
   1015 	// clearpools before we start the GC. If we wait they memory will not be
   1016 	// reclaimed until the next GC cycle.
   1017 	clearpools()
   1018 
   1019 	if mode == gcBackgroundMode { // Do as much work concurrently as possible
   1020 		gcController.startCycle()
   1021 		work.heapGoal = memstats.next_gc
   1022 
   1023 		// Enter concurrent mark phase and enable
   1024 		// write barriers.
   1025 		//
   1026 		// Because the world is stopped, all Ps will
   1027 		// observe that write barriers are enabled by
   1028 		// the time we start the world and begin
   1029 		// scanning.
   1030 		//
   1031 		// It's necessary to enable write barriers
   1032 		// during the scan phase for several reasons:
   1033 		//
   1034 		// They must be enabled for writes to higher
   1035 		// stack frames before we scan stacks and
   1036 		// install stack barriers because this is how
   1037 		// we track writes to inactive stack frames.
   1038 		// (Alternatively, we could not install stack
   1039 		// barriers over frame boundaries with
   1040 		// up-pointers).
   1041 		//
   1042 		// They must be enabled before assists are
   1043 		// enabled because they must be enabled before
   1044 		// any non-leaf heap objects are marked. Since
   1045 		// allocations are blocked until assists can
   1046 		// happen, we want enable assists as early as
   1047 		// possible.
   1048 		setGCPhase(_GCmark)
   1049 
   1050 		gcBgMarkPrepare() // Must happen before assist enable.
   1051 		gcMarkRootPrepare()
   1052 
   1053 		// Mark all active tinyalloc blocks. Since we're
   1054 		// allocating from these, they need to be black like
   1055 		// other allocations. The alternative is to blacken
   1056 		// the tiny block on every allocation from it, which
   1057 		// would slow down the tiny allocator.
   1058 		gcMarkTinyAllocs()
   1059 
   1060 		// At this point all Ps have enabled the write
   1061 		// barrier, thus maintaining the no white to
   1062 		// black invariant. Enable mutator assists to
   1063 		// put back-pressure on fast allocating
   1064 		// mutators.
   1065 		atomic.Store(&gcBlackenEnabled, 1)
   1066 
   1067 		// Assists and workers can start the moment we start
   1068 		// the world.
   1069 		gcController.markStartTime = now
   1070 
   1071 		// Concurrent mark.
   1072 		systemstack(startTheWorldWithSema)
   1073 		now = nanotime()
   1074 		work.pauseNS += now - work.pauseStart
   1075 		work.tMark = now
   1076 	} else {
   1077 		t := nanotime()
   1078 		work.tMark, work.tMarkTerm = t, t
   1079 		work.heapGoal = work.heap0
   1080 
   1081 		if forced {
   1082 			memstats.numforcedgc++
   1083 		}
   1084 
   1085 		// Perform mark termination. This will restart the world.
   1086 		gcMarkTermination()
   1087 	}
   1088 
   1089 	if useStartSema {
   1090 		semrelease(&work.startSema)
   1091 	}
   1092 }
   1093 
   1094 // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2
   1095 // to mark termination.
   1096 //
   1097 // This should be called when all mark work has been drained. In mark
   1098 // 1, this includes all root marking jobs, global work buffers, and
   1099 // active work buffers in assists and background workers; however,
   1100 // work may still be cached in per-P work buffers. In mark 2, per-P
   1101 // caches are disabled.
   1102 //
   1103 // The calling context must be preemptible.
   1104 //
   1105 // Note that it is explicitly okay to have write barriers in this
   1106 // function because completion of concurrent mark is best-effort
   1107 // anyway. Any work created by write barriers here will be cleaned up
   1108 // by mark termination.
   1109 func gcMarkDone() {
   1110 top:
   1111 	semacquire(&work.markDoneSema, 0)
   1112 
   1113 	// Re-check transition condition under transition lock.
   1114 	if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
   1115 		semrelease(&work.markDoneSema)
   1116 		return
   1117 	}
   1118 
   1119 	// Disallow starting new workers so that any remaining workers
   1120 	// in the current mark phase will drain out.
   1121 	//
   1122 	// TODO(austin): Should dedicated workers keep an eye on this
   1123 	// and exit gcDrain promptly?
   1124 	atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff)
   1125 	atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff)
   1126 
   1127 	if !gcBlackenPromptly {
   1128 		// Transition from mark 1 to mark 2.
   1129 		//
   1130 		// The global work list is empty, but there can still be work
   1131 		// sitting in the per-P work caches.
   1132 		// Flush and disable work caches.
   1133 
   1134 		// Disallow caching workbufs and indicate that we're in mark 2.
   1135 		gcBlackenPromptly = true
   1136 
   1137 		// Prevent completion of mark 2 until we've flushed
   1138 		// cached workbufs.
   1139 		atomic.Xadd(&work.nwait, -1)
   1140 
   1141 		// GC is set up for mark 2. Let Gs blocked on the
   1142 		// transition lock go while we flush caches.
   1143 		semrelease(&work.markDoneSema)
   1144 
   1145 		systemstack(func() {
   1146 			// Flush all currently cached workbufs and
   1147 			// ensure all Ps see gcBlackenPromptly. This
   1148 			// also blocks until any remaining mark 1
   1149 			// workers have exited their loop so we can
   1150 			// start new mark 2 workers.
   1151 			forEachP(func(_p_ *p) {
   1152 				_p_.gcw.dispose()
   1153 			})
   1154 		})
   1155 
   1156 		// Check that roots are marked. We should be able to
   1157 		// do this before the forEachP, but based on issue
   1158 		// #16083 there may be a (harmless) race where we can
   1159 		// enter mark 2 while some workers are still scanning
   1160 		// stacks. The forEachP ensures these scans are done.
   1161 		//
   1162 		// TODO(austin): Figure out the race and fix this
   1163 		// properly.
   1164 		gcMarkRootCheck()
   1165 
   1166 		// Now we can start up mark 2 workers.
   1167 		atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff)
   1168 		atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff)
   1169 
   1170 		incnwait := atomic.Xadd(&work.nwait, +1)
   1171 		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
   1172 			// This loop will make progress because
   1173 			// gcBlackenPromptly is now true, so it won't
   1174 			// take this same "if" branch.
   1175 			goto top
   1176 		}
   1177 	} else {
   1178 		// Transition to mark termination.
   1179 		now := nanotime()
   1180 		work.tMarkTerm = now
   1181 		work.pauseStart = now
   1182 		getg().m.preemptoff = "gcing"
   1183 		systemstack(stopTheWorldWithSema)
   1184 		// The gcphase is _GCmark, it will transition to _GCmarktermination
   1185 		// below. The important thing is that the wb remains active until
   1186 		// all marking is complete. This includes writes made by the GC.
   1187 
   1188 		// Record that one root marking pass has completed.
   1189 		work.markrootDone = true
   1190 
   1191 		// Disable assists and background workers. We must do
   1192 		// this before waking blocked assists.
   1193 		atomic.Store(&gcBlackenEnabled, 0)
   1194 
   1195 		// Wake all blocked assists. These will run when we
   1196 		// start the world again.
   1197 		gcWakeAllAssists()
   1198 
   1199 		// Likewise, release the transition lock. Blocked
   1200 		// workers and assists will run when we start the
   1201 		// world again.
   1202 		semrelease(&work.markDoneSema)
   1203 
   1204 		// endCycle depends on all gcWork cache stats being
   1205 		// flushed. This is ensured by mark 2.
   1206 		gcController.endCycle()
   1207 
   1208 		// Perform mark termination. This will restart the world.
   1209 		gcMarkTermination()
   1210 	}
   1211 }
   1212 
   1213 func gcMarkTermination() {
   1214 	// World is stopped.
   1215 	// Start marktermination which includes enabling the write barrier.
   1216 	atomic.Store(&gcBlackenEnabled, 0)
   1217 	gcBlackenPromptly = false
   1218 	setGCPhase(_GCmarktermination)
   1219 
   1220 	work.heap1 = memstats.heap_live
   1221 	startTime := nanotime()
   1222 
   1223 	mp := acquirem()
   1224 	mp.preemptoff = "gcing"
   1225 	_g_ := getg()
   1226 	_g_.m.traceback = 2
   1227 	gp := _g_.m.curg
   1228 	casgstatus(gp, _Grunning, _Gwaiting)
   1229 	gp.waitreason = "garbage collection"
   1230 
   1231 	// Run gc on the g0 stack. We do this so that the g stack
   1232 	// we're currently running on will no longer change. Cuts
   1233 	// the root set down a bit (g0 stacks are not scanned, and
   1234 	// we don't need to scan gc's internal state).  We also
   1235 	// need to switch to g0 so we can shrink the stack.
   1236 	systemstack(func() {
   1237 		gcMark(startTime)
   1238 		// Must return immediately.
   1239 		// The outer function's stack may have moved
   1240 		// during gcMark (it shrinks stacks, including the
   1241 		// outer function's stack), so we must not refer
   1242 		// to any of its variables. Return back to the
   1243 		// non-system stack to pick up the new addresses
   1244 		// before continuing.
   1245 	})
   1246 
   1247 	systemstack(func() {
   1248 		work.heap2 = work.bytesMarked
   1249 		if debug.gccheckmark > 0 {
   1250 			// Run a full stop-the-world mark using checkmark bits,
   1251 			// to check that we didn't forget to mark anything during
   1252 			// the concurrent mark process.
   1253 			gcResetMarkState()
   1254 			initCheckmarks()
   1255 			gcMark(startTime)
   1256 			clearCheckmarks()
   1257 		}
   1258 
   1259 		// marking is complete so we can turn the write barrier off
   1260 		setGCPhase(_GCoff)
   1261 		gcSweep(work.mode)
   1262 
   1263 		if debug.gctrace > 1 {
   1264 			startTime = nanotime()
   1265 			// The g stacks have been scanned so
   1266 			// they have gcscanvalid==true and gcworkdone==true.
   1267 			// Reset these so that all stacks will be rescanned.
   1268 			gcResetMarkState()
   1269 			finishsweep_m()
   1270 
   1271 			// Still in STW but gcphase is _GCoff, reset to _GCmarktermination
   1272 			// At this point all objects will be found during the gcMark which
   1273 			// does a complete STW mark and object scan.
   1274 			setGCPhase(_GCmarktermination)
   1275 			gcMark(startTime)
   1276 			setGCPhase(_GCoff) // marking is done, turn off wb.
   1277 			gcSweep(work.mode)
   1278 		}
   1279 	})
   1280 
   1281 	_g_.m.traceback = 0
   1282 	casgstatus(gp, _Gwaiting, _Grunning)
   1283 
   1284 	if trace.enabled {
   1285 		traceGCDone()
   1286 	}
   1287 
   1288 	// all done
   1289 	mp.preemptoff = ""
   1290 
   1291 	if gcphase != _GCoff {
   1292 		throw("gc done but gcphase != _GCoff")
   1293 	}
   1294 
   1295 	// Update timing memstats
   1296 	now, unixNow := nanotime(), unixnanotime()
   1297 	work.pauseNS += now - work.pauseStart
   1298 	work.tEnd = now
   1299 	atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user
   1300 	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
   1301 	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
   1302 	memstats.pause_total_ns += uint64(work.pauseNS)
   1303 
   1304 	// Update work.totaltime.
   1305 	sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
   1306 	// We report idle marking time below, but omit it from the
   1307 	// overall utilization here since it's "free".
   1308 	markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
   1309 	markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
   1310 	cycleCpu := sweepTermCpu + markCpu + markTermCpu
   1311 	work.totaltime += cycleCpu
   1312 
   1313 	// Compute overall GC CPU utilization.
   1314 	totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
   1315 	memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
   1316 
   1317 	memstats.numgc++
   1318 
   1319 	// Reset sweep state.
   1320 	sweep.nbgsweep = 0
   1321 	sweep.npausesweep = 0
   1322 
   1323 	systemstack(startTheWorldWithSema)
   1324 
   1325 	// Update heap profile stats if gcSweep didn't do it. This is
   1326 	// relatively expensive, so we don't want to do it while the
   1327 	// world is stopped, but it needs to happen ASAP after
   1328 	// starting the world to prevent too many allocations from the
   1329 	// next cycle leaking in. It must happen before releasing
   1330 	// worldsema since there are applications that do a
   1331 	// runtime.GC() to update the heap profile and then
   1332 	// immediately collect the profile.
   1333 	if _ConcurrentSweep && work.mode != gcForceBlockMode {
   1334 		mProf_GC()
   1335 	}
   1336 
   1337 	// Free stack spans. This must be done between GC cycles.
   1338 	systemstack(freeStackSpans)
   1339 
   1340 	// Best-effort remove stack barriers so they don't get in the
   1341 	// way of things like GDB and perf.
   1342 	lock(&allglock)
   1343 	myallgs := allgs
   1344 	unlock(&allglock)
   1345 	gcTryRemoveAllStackBarriers(myallgs)
   1346 
   1347 	// Print gctrace before dropping worldsema. As soon as we drop
   1348 	// worldsema another cycle could start and smash the stats
   1349 	// we're trying to print.
   1350 	if debug.gctrace > 0 {
   1351 		util := int(memstats.gc_cpu_fraction * 100)
   1352 
   1353 		var sbuf [24]byte
   1354 		printlock()
   1355 		print("gc ", memstats.numgc,
   1356 			" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
   1357 			util, "%: ")
   1358 		prev := work.tSweepTerm
   1359 		for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
   1360 			if i != 0 {
   1361 				print("+")
   1362 			}
   1363 			print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
   1364 			prev = ns
   1365 		}
   1366 		print(" ms clock, ")
   1367 		for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} {
   1368 			if i == 2 || i == 3 {
   1369 				// Separate mark time components with /.
   1370 				print("/")
   1371 			} else if i != 0 {
   1372 				print("+")
   1373 			}
   1374 			print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
   1375 		}
   1376 		print(" ms cpu, ",
   1377 			work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
   1378 			work.heapGoal>>20, " MB goal, ",
   1379 			work.maxprocs, " P")
   1380 		if work.mode != gcBackgroundMode {
   1381 			print(" (forced)")
   1382 		}
   1383 		print("\n")
   1384 		printunlock()
   1385 	}
   1386 
   1387 	semrelease(&worldsema)
   1388 	// Careful: another GC cycle may start now.
   1389 
   1390 	releasem(mp)
   1391 	mp = nil
   1392 
   1393 	// now that gc is done, kick off finalizer thread if needed
   1394 	if !concurrentSweep {
   1395 		// give the queued finalizers, if any, a chance to run
   1396 		Gosched()
   1397 	}
   1398 }
   1399 
   1400 // gcBgMarkStartWorkers prepares background mark worker goroutines.
   1401 // These goroutines will not run until the mark phase, but they must
   1402 // be started while the work is not stopped and from a regular G
   1403 // stack. The caller must hold worldsema.
   1404 func gcBgMarkStartWorkers() {
   1405 	// Background marking is performed by per-P G's. Ensure that
   1406 	// each P has a background GC G.
   1407 	for _, p := range &allp {
   1408 		if p == nil || p.status == _Pdead {
   1409 			break
   1410 		}
   1411 		if p.gcBgMarkWorker == 0 {
   1412 			go gcBgMarkWorker(p)
   1413 			notetsleepg(&work.bgMarkReady, -1)
   1414 			noteclear(&work.bgMarkReady)
   1415 		}
   1416 	}
   1417 }
   1418 
   1419 // gcBgMarkPrepare sets up state for background marking.
   1420 // Mutator assists must not yet be enabled.
   1421 func gcBgMarkPrepare() {
   1422 	// Background marking will stop when the work queues are empty
   1423 	// and there are no more workers (note that, since this is
   1424 	// concurrent, this may be a transient state, but mark
   1425 	// termination will clean it up). Between background workers
   1426 	// and assists, we don't really know how many workers there
   1427 	// will be, so we pretend to have an arbitrarily large number
   1428 	// of workers, almost all of which are "waiting". While a
   1429 	// worker is working it decrements nwait. If nproc == nwait,
   1430 	// there are no workers.
   1431 	work.nproc = ^uint32(0)
   1432 	work.nwait = ^uint32(0)
   1433 }
   1434 
   1435 func gcBgMarkWorker(_p_ *p) {
   1436 	gp := getg()
   1437 
   1438 	type parkInfo struct {
   1439 		m      muintptr // Release this m on park.
   1440 		attach puintptr // If non-nil, attach to this p on park.
   1441 	}
   1442 	// We pass park to a gopark unlock function, so it can't be on
   1443 	// the stack (see gopark). Prevent deadlock from recursively
   1444 	// starting GC by disabling preemption.
   1445 	gp.m.preemptoff = "GC worker init"
   1446 	park := new(parkInfo)
   1447 	gp.m.preemptoff = ""
   1448 
   1449 	park.m.set(acquirem())
   1450 	park.attach.set(_p_)
   1451 	// Inform gcBgMarkStartWorkers that this worker is ready.
   1452 	// After this point, the background mark worker is scheduled
   1453 	// cooperatively by gcController.findRunnable. Hence, it must
   1454 	// never be preempted, as this would put it into _Grunnable
   1455 	// and put it on a run queue. Instead, when the preempt flag
   1456 	// is set, this puts itself into _Gwaiting to be woken up by
   1457 	// gcController.findRunnable at the appropriate time.
   1458 	notewakeup(&work.bgMarkReady)
   1459 
   1460 	for {
   1461 		// Go to sleep until woken by gcController.findRunnable.
   1462 		// We can't releasem yet since even the call to gopark
   1463 		// may be preempted.
   1464 		gopark(func(g *g, parkp unsafe.Pointer) bool {
   1465 			park := (*parkInfo)(parkp)
   1466 
   1467 			// The worker G is no longer running, so it's
   1468 			// now safe to allow preemption.
   1469 			releasem(park.m.ptr())
   1470 
   1471 			// If the worker isn't attached to its P,
   1472 			// attach now. During initialization and after
   1473 			// a phase change, the worker may have been
   1474 			// running on a different P. As soon as we
   1475 			// attach, the owner P may schedule the
   1476 			// worker, so this must be done after the G is
   1477 			// stopped.
   1478 			if park.attach != 0 {
   1479 				p := park.attach.ptr()
   1480 				park.attach.set(nil)
   1481 				// cas the worker because we may be
   1482 				// racing with a new worker starting
   1483 				// on this P.
   1484 				if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) {
   1485 					// The P got a new worker.
   1486 					// Exit this worker.
   1487 					return false
   1488 				}
   1489 			}
   1490 			return true
   1491 		}, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0)
   1492 
   1493 		// Loop until the P dies and disassociates this
   1494 		// worker (the P may later be reused, in which case
   1495 		// it will get a new worker) or we failed to associate.
   1496 		if _p_.gcBgMarkWorker.ptr() != gp {
   1497 			break
   1498 		}
   1499 
   1500 		// Disable preemption so we can use the gcw. If the
   1501 		// scheduler wants to preempt us, we'll stop draining,
   1502 		// dispose the gcw, and then preempt.
   1503 		park.m.set(acquirem())
   1504 
   1505 		if gcBlackenEnabled == 0 {
   1506 			throw("gcBgMarkWorker: blackening not enabled")
   1507 		}
   1508 
   1509 		startTime := nanotime()
   1510 
   1511 		decnwait := atomic.Xadd(&work.nwait, -1)
   1512 		if decnwait == work.nproc {
   1513 			println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
   1514 			throw("work.nwait was > work.nproc")
   1515 		}
   1516 
   1517 		systemstack(func() {
   1518 			// Mark our goroutine preemptible so its stack
   1519 			// can be scanned. This lets two mark workers
   1520 			// scan each other (otherwise, they would
   1521 			// deadlock). We must not modify anything on
   1522 			// the G stack. However, stack shrinking is
   1523 			// disabled for mark workers, so it is safe to
   1524 			// read from the G stack.
   1525 			casgstatus(gp, _Grunning, _Gwaiting)
   1526 			switch _p_.gcMarkWorkerMode {
   1527 			default:
   1528 				throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
   1529 			case gcMarkWorkerDedicatedMode:
   1530 				gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
   1531 			case gcMarkWorkerFractionalMode:
   1532 				gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
   1533 			case gcMarkWorkerIdleMode:
   1534 				gcDrain(&_p_.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
   1535 			}
   1536 			casgstatus(gp, _Gwaiting, _Grunning)
   1537 		})
   1538 
   1539 		// If we are nearing the end of mark, dispose
   1540 		// of the cache promptly. We must do this
   1541 		// before signaling that we're no longer
   1542 		// working so that other workers can't observe
   1543 		// no workers and no work while we have this
   1544 		// cached, and before we compute done.
   1545 		if gcBlackenPromptly {
   1546 			_p_.gcw.dispose()
   1547 		}
   1548 
   1549 		// Account for time.
   1550 		duration := nanotime() - startTime
   1551 		switch _p_.gcMarkWorkerMode {
   1552 		case gcMarkWorkerDedicatedMode:
   1553 			atomic.Xaddint64(&gcController.dedicatedMarkTime, duration)
   1554 			atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
   1555 		case gcMarkWorkerFractionalMode:
   1556 			atomic.Xaddint64(&gcController.fractionalMarkTime, duration)
   1557 			atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
   1558 		case gcMarkWorkerIdleMode:
   1559 			atomic.Xaddint64(&gcController.idleMarkTime, duration)
   1560 		}
   1561 
   1562 		// Was this the last worker and did we run out
   1563 		// of work?
   1564 		incnwait := atomic.Xadd(&work.nwait, +1)
   1565 		if incnwait > work.nproc {
   1566 			println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode,
   1567 				"work.nwait=", incnwait, "work.nproc=", work.nproc)
   1568 			throw("work.nwait > work.nproc")
   1569 		}
   1570 
   1571 		// If this worker reached a background mark completion
   1572 		// point, signal the main GC goroutine.
   1573 		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
   1574 			// Make this G preemptible and disassociate it
   1575 			// as the worker for this P so
   1576 			// findRunnableGCWorker doesn't try to
   1577 			// schedule it.
   1578 			_p_.gcBgMarkWorker.set(nil)
   1579 			releasem(park.m.ptr())
   1580 
   1581 			gcMarkDone()
   1582 
   1583 			// Disable preemption and prepare to reattach
   1584 			// to the P.
   1585 			//
   1586 			// We may be running on a different P at this
   1587 			// point, so we can't reattach until this G is
   1588 			// parked.
   1589 			park.m.set(acquirem())
   1590 			park.attach.set(_p_)
   1591 		}
   1592 	}
   1593 }
   1594 
   1595 // gcMarkWorkAvailable returns true if executing a mark worker
   1596 // on p is potentially useful. p may be nil, in which case it only
   1597 // checks the global sources of work.
   1598 func gcMarkWorkAvailable(p *p) bool {
   1599 	if p != nil && !p.gcw.empty() {
   1600 		return true
   1601 	}
   1602 	if atomic.Load64(&work.full) != 0 {
   1603 		return true // global work available
   1604 	}
   1605 	if work.markrootNext < work.markrootJobs {
   1606 		return true // root scan work available
   1607 	}
   1608 	return false
   1609 }
   1610 
   1611 // gcMark runs the mark (or, for concurrent GC, mark termination)
   1612 // All gcWork caches must be empty.
   1613 // STW is in effect at this point.
   1614 //TODO go:nowritebarrier
   1615 func gcMark(start_time int64) {
   1616 	if debug.allocfreetrace > 0 {
   1617 		tracegc()
   1618 	}
   1619 
   1620 	if gcphase != _GCmarktermination {
   1621 		throw("in gcMark expecting to see gcphase as _GCmarktermination")
   1622 	}
   1623 	work.tstart = start_time
   1624 
   1625 	// Queue root marking jobs.
   1626 	gcMarkRootPrepare()
   1627 
   1628 	work.nwait = 0
   1629 	work.ndone = 0
   1630 	work.nproc = uint32(gcprocs())
   1631 
   1632 	if debug.gcrescanstacks == 0 && work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots+work.nRescanRoots == 0 {
   1633 		// There's no work on the work queue and no root jobs
   1634 		// that can produce work, so don't bother entering the
   1635 		// getfull() barrier.
   1636 		//
   1637 		// With the hybrid barrier enabled, this will be the
   1638 		// situation the vast majority of the time after
   1639 		// concurrent mark. However, we still need a fallback
   1640 		// for STW GC and because there are some known races
   1641 		// that occasionally leave work around for mark
   1642 		// termination.
   1643 		//
   1644 		// We're still hedging our bets here: if we do
   1645 		// accidentally produce some work, we'll still process
   1646 		// it, just not necessarily in parallel.
   1647 		//
   1648 		// TODO(austin): When we eliminate
   1649 		// debug.gcrescanstacks: fix the races, and remove
   1650 		// work draining from mark termination so we don't
   1651 		// need the fallback path.
   1652 		work.helperDrainBlock = false
   1653 	} else {
   1654 		work.helperDrainBlock = true
   1655 	}
   1656 
   1657 	if trace.enabled {
   1658 		traceGCScanStart()
   1659 	}
   1660 
   1661 	if work.nproc > 1 {
   1662 		noteclear(&work.alldone)
   1663 		helpgc(int32(work.nproc))
   1664 	}
   1665 
   1666 	gchelperstart()
   1667 
   1668 	gcw := &getg().m.p.ptr().gcw
   1669 	if work.helperDrainBlock {
   1670 		gcDrain(gcw, gcDrainBlock)
   1671 	} else {
   1672 		gcDrain(gcw, gcDrainNoBlock)
   1673 	}
   1674 	gcw.dispose()
   1675 
   1676 	if debug.gccheckmark > 0 {
   1677 		// This is expensive when there's a large number of
   1678 		// Gs, so only do it if checkmark is also enabled.
   1679 		gcMarkRootCheck()
   1680 	}
   1681 	if work.full != 0 {
   1682 		throw("work.full != 0")
   1683 	}
   1684 
   1685 	if work.nproc > 1 {
   1686 		notesleep(&work.alldone)
   1687 	}
   1688 
   1689 	// Record that at least one root marking pass has completed.
   1690 	work.markrootDone = true
   1691 
   1692 	// Double-check that all gcWork caches are empty. This should
   1693 	// be ensured by mark 2 before we enter mark termination.
   1694 	for i := 0; i < int(gomaxprocs); i++ {
   1695 		gcw := &allp[i].gcw
   1696 		if !gcw.empty() {
   1697 			throw("P has cached GC work at end of mark termination")
   1698 		}
   1699 		if gcw.scanWork != 0 || gcw.bytesMarked != 0 {
   1700 			throw("P has unflushed stats at end of mark termination")
   1701 		}
   1702 	}
   1703 
   1704 	if trace.enabled {
   1705 		traceGCScanDone()
   1706 	}
   1707 
   1708 	cachestats()
   1709 
   1710 	// Update the marked heap stat.
   1711 	memstats.heap_marked = work.bytesMarked
   1712 
   1713 	// Trigger the next GC cycle when the allocated heap has grown
   1714 	// by triggerRatio over the marked heap size. Assume that
   1715 	// we're in steady state, so the marked heap size is the
   1716 	// same now as it was at the beginning of the GC cycle.
   1717 	memstats.gc_trigger = uint64(float64(memstats.heap_marked) * (1 + gcController.triggerRatio))
   1718 	if memstats.gc_trigger < heapminimum {
   1719 		memstats.gc_trigger = heapminimum
   1720 	}
   1721 	if int64(memstats.gc_trigger) < 0 {
   1722 		print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n")
   1723 		throw("gc_trigger underflow")
   1724 	}
   1725 
   1726 	// Update other GC heap size stats. This must happen after
   1727 	// cachestats (which flushes local statistics to these) and
   1728 	// flushallmcaches (which modifies heap_live).
   1729 	memstats.heap_live = work.bytesMarked
   1730 	memstats.heap_scan = uint64(gcController.scanWork)
   1731 
   1732 	minTrigger := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100
   1733 	if memstats.gc_trigger < minTrigger {
   1734 		// The allocated heap is already past the trigger.
   1735 		// This can happen if the triggerRatio is very low and
   1736 		// the marked heap is less than the live heap size.
   1737 		//
   1738 		// Concurrent sweep happens in the heap growth from
   1739 		// heap_live to gc_trigger, so bump gc_trigger up to ensure
   1740 		// that concurrent sweep has some heap growth in which
   1741 		// to perform sweeping before we start the next GC
   1742 		// cycle.
   1743 		memstats.gc_trigger = minTrigger
   1744 	}
   1745 
   1746 	// The next GC cycle should finish before the allocated heap
   1747 	// has grown by GOGC/100.
   1748 	memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
   1749 	if gcpercent < 0 {
   1750 		memstats.next_gc = ^uint64(0)
   1751 	}
   1752 	if memstats.next_gc < memstats.gc_trigger {
   1753 		memstats.next_gc = memstats.gc_trigger
   1754 	}
   1755 
   1756 	if trace.enabled {
   1757 		traceHeapAlloc()
   1758 		traceNextGC()
   1759 	}
   1760 }
   1761 
   1762 func gcSweep(mode gcMode) {
   1763 	if gcphase != _GCoff {
   1764 		throw("gcSweep being done but phase is not GCoff")
   1765 	}
   1766 
   1767 	lock(&mheap_.lock)
   1768 	mheap_.sweepgen += 2
   1769 	mheap_.sweepdone = 0
   1770 	if mheap_.sweepSpans[mheap_.sweepgen/2%2].index != 0 {
   1771 		// We should have drained this list during the last
   1772 		// sweep phase. We certainly need to start this phase
   1773 		// with an empty swept list.
   1774 		throw("non-empty swept list")
   1775 	}
   1776 	unlock(&mheap_.lock)
   1777 
   1778 	if !_ConcurrentSweep || mode == gcForceBlockMode {
   1779 		// Special case synchronous sweep.
   1780 		// Record that no proportional sweeping has to happen.
   1781 		lock(&mheap_.lock)
   1782 		mheap_.sweepPagesPerByte = 0
   1783 		mheap_.pagesSwept = 0
   1784 		unlock(&mheap_.lock)
   1785 		// Sweep all spans eagerly.
   1786 		for sweepone() != ^uintptr(0) {
   1787 			sweep.npausesweep++
   1788 		}
   1789 		// Do an additional mProf_GC, because all 'free' events are now real as well.
   1790 		mProf_GC()
   1791 		mProf_GC()
   1792 		return
   1793 	}
   1794 
   1795 	// Concurrent sweep needs to sweep all of the in-use pages by
   1796 	// the time the allocated heap reaches the GC trigger. Compute
   1797 	// the ratio of in-use pages to sweep per byte allocated.
   1798 	heapDistance := int64(memstats.gc_trigger) - int64(memstats.heap_live)
   1799 	// Add a little margin so rounding errors and concurrent
   1800 	// sweep are less likely to leave pages unswept when GC starts.
   1801 	heapDistance -= 1024 * 1024
   1802 	if heapDistance < _PageSize {
   1803 		// Avoid setting the sweep ratio extremely high
   1804 		heapDistance = _PageSize
   1805 	}
   1806 	lock(&mheap_.lock)
   1807 	mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance)
   1808 	mheap_.pagesSwept = 0
   1809 	mheap_.spanBytesAlloc = 0
   1810 	unlock(&mheap_.lock)
   1811 
   1812 	// Background sweep.
   1813 	lock(&sweep.lock)
   1814 	if sweep.parked {
   1815 		sweep.parked = false
   1816 		ready(sweep.g, 0, true)
   1817 	}
   1818 	unlock(&sweep.lock)
   1819 }
   1820 
   1821 // gcResetMarkState resets global state prior to marking (concurrent
   1822 // or STW) and resets the stack scan state of all Gs.
   1823 //
   1824 // This is safe to do without the world stopped because any Gs created
   1825 // during or after this will start out in the reset state.
   1826 func gcResetMarkState() {
   1827 	// This may be called during a concurrent phase, so make sure
   1828 	// allgs doesn't change.
   1829 	if !(gcphase == _GCoff || gcphase == _GCmarktermination) {
   1830 		// Accessing gcRescan is unsafe.
   1831 		throw("bad GC phase")
   1832 	}
   1833 	lock(&allglock)
   1834 	for _, gp := range allgs {
   1835 		gp.gcscandone = false  // set to true in gcphasework
   1836 		gp.gcscanvalid = false // stack has not been scanned
   1837 		gp.gcRescan = -1
   1838 		gp.gcAssistBytes = 0
   1839 	}
   1840 	unlock(&allglock)
   1841 
   1842 	// Clear rescan list.
   1843 	work.rescan.list = work.rescan.list[:0]
   1844 
   1845 	work.bytesMarked = 0
   1846 	work.initialHeapLive = memstats.heap_live
   1847 	work.markrootDone = false
   1848 }
   1849 
   1850 // Hooks for other packages
   1851 
   1852 var poolcleanup func()
   1853 
   1854 //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
   1855 func sync_runtime_registerPoolCleanup(f func()) {
   1856 	poolcleanup = f
   1857 }
   1858 
   1859 func clearpools() {
   1860 	// clear sync.Pools
   1861 	if poolcleanup != nil {
   1862 		poolcleanup()
   1863 	}
   1864 
   1865 	// Clear central sudog cache.
   1866 	// Leave per-P caches alone, they have strictly bounded size.
   1867 	// Disconnect cached list before dropping it on the floor,
   1868 	// so that a dangling ref to one entry does not pin all of them.
   1869 	lock(&sched.sudoglock)
   1870 	var sg, sgnext *sudog
   1871 	for sg = sched.sudogcache; sg != nil; sg = sgnext {
   1872 		sgnext = sg.next
   1873 		sg.next = nil
   1874 	}
   1875 	sched.sudogcache = nil
   1876 	unlock(&sched.sudoglock)
   1877 
   1878 	// Clear central defer pools.
   1879 	// Leave per-P pools alone, they have strictly bounded size.
   1880 	lock(&sched.deferlock)
   1881 	for i := range sched.deferpool {
   1882 		// disconnect cached list before dropping it on the floor,
   1883 		// so that a dangling ref to one entry does not pin all of them.
   1884 		var d, dlink *_defer
   1885 		for d = sched.deferpool[i]; d != nil; d = dlink {
   1886 			dlink = d.link
   1887 			d.link = nil
   1888 		}
   1889 		sched.deferpool[i] = nil
   1890 	}
   1891 	unlock(&sched.deferlock)
   1892 }
   1893 
   1894 // Timing
   1895 
   1896 //go:nowritebarrier
   1897 func gchelper() {
   1898 	_g_ := getg()
   1899 	_g_.m.traceback = 2
   1900 	gchelperstart()
   1901 
   1902 	if trace.enabled {
   1903 		traceGCScanStart()
   1904 	}
   1905 
   1906 	// Parallel mark over GC roots and heap
   1907 	if gcphase == _GCmarktermination {
   1908 		gcw := &_g_.m.p.ptr().gcw
   1909 		if work.helperDrainBlock {
   1910 			gcDrain(gcw, gcDrainBlock) // blocks in getfull
   1911 		} else {
   1912 			gcDrain(gcw, gcDrainNoBlock)
   1913 		}
   1914 		gcw.dispose()
   1915 	}
   1916 
   1917 	if trace.enabled {
   1918 		traceGCScanDone()
   1919 	}
   1920 
   1921 	nproc := work.nproc // work.nproc can change right after we increment work.ndone
   1922 	if atomic.Xadd(&work.ndone, +1) == nproc-1 {
   1923 		notewakeup(&work.alldone)
   1924 	}
   1925 	_g_.m.traceback = 0
   1926 }
   1927 
   1928 func gchelperstart() {
   1929 	_g_ := getg()
   1930 
   1931 	if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
   1932 		throw("gchelperstart: bad m->helpgc")
   1933 	}
   1934 	if _g_ != _g_.m.g0 {
   1935 		throw("gchelper not running on g0 stack")
   1936 	}
   1937 }
   1938 
   1939 // itoaDiv formats val/(10**dec) into buf.
   1940 func itoaDiv(buf []byte, val uint64, dec int) []byte {
   1941 	i := len(buf) - 1
   1942 	idec := i - dec
   1943 	for val >= 10 || i >= idec {
   1944 		buf[i] = byte(val%10 + '0')
   1945 		i--
   1946 		if i == idec {
   1947 			buf[i] = '.'
   1948 			i--
   1949 		}
   1950 		val /= 10
   1951 	}
   1952 	buf[i] = byte(val + '0')
   1953 	return buf[i:]
   1954 }
   1955 
   1956 // fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
   1957 func fmtNSAsMS(buf []byte, ns uint64) []byte {
   1958 	if ns >= 10e6 {
   1959 		// Format as whole milliseconds.
   1960 		return itoaDiv(buf, ns/1e6, 0)
   1961 	}
   1962 	// Format two digits of precision, with at most three decimal places.
   1963 	x := ns / 1e3
   1964 	if x == 0 {
   1965 		buf[0] = '0'
   1966 		return buf[:1]
   1967 	}
   1968 	dec := 3
   1969 	for x >= 100 {
   1970 		x /= 10
   1971 		dec--
   1972 	}
   1973 	return itoaDiv(buf, x, dec)
   1974 }
   1975