Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import "unsafe"
      8 
      9 const (
     10 	_Debugwbufs  = false   // if true check wbufs consistency
     11 	_WorkbufSize = 1 * 256 // in bytes - if small wbufs are passed to GC in a timely fashion.
     12 )
     13 
     14 // Garbage collector work pool abstraction.
     15 //
     16 // This implements a producer/consumer model for pointers to grey
     17 // objects.  A grey object is one that is marked and on a work
     18 // queue.  A black object is marked and not on a work queue.
     19 //
     20 // Write barriers, root discovery, stack scanning, and object scanning
     21 // produce pointers to grey objects.  Scanning consumes pointers to
     22 // grey objects, thus blackening them, and then scans them,
     23 // potentially producing new pointers to grey objects.
     24 
     25 // A wbufptr holds a workbuf*, but protects it from write barriers.
     26 // workbufs never live on the heap, so write barriers are unnecessary.
     27 // Write barriers on workbuf pointers may also be dangerous in the GC.
     28 type wbufptr uintptr
     29 
     30 func wbufptrOf(w *workbuf) wbufptr {
     31 	return wbufptr(unsafe.Pointer(w))
     32 }
     33 
     34 func (wp wbufptr) ptr() *workbuf {
     35 	return (*workbuf)(unsafe.Pointer(wp))
     36 }
     37 
     38 // A gcWork provides the interface to produce and consume work for the
     39 // garbage collector.
     40 //
     41 // A gcWork can be used on the stack as follows:
     42 //
     43 //     var gcw gcWork
     44 //     disable preemption
     45 //     .. call gcw.put() to produce and gcw.get() to consume ..
     46 //     gcw.dispose()
     47 //     enable preemption
     48 //
     49 // Or from the per-P gcWork cache:
     50 //
     51 //     (preemption must be disabled)
     52 //     gcw := &getg().m.p.ptr().gcw
     53 //     .. call gcw.put() to produce and gcw.get() to consume ..
     54 //     if gcphase == _GCmarktermination {
     55 //         gcw.dispose()
     56 //     }
     57 //
     58 // It's important that any use of gcWork during the mark phase prevent
     59 // the garbage collector from transitioning to mark termination since
     60 // gcWork may locally hold GC work buffers. This can be done by
     61 // disabling preemption (systemstack or acquirem).
     62 type gcWork struct {
     63 	// Invariant: wbuf is never full or empty
     64 	wbuf wbufptr
     65 
     66 	// Bytes marked (blackened) on this gcWork. This is aggregated
     67 	// into work.bytesMarked by dispose.
     68 	bytesMarked uint64
     69 
     70 	// Scan work performed on this gcWork. This is aggregated into
     71 	// gcController by dispose.
     72 	scanWork int64
     73 }
     74 
     75 // put enqueues a pointer for the garbage collector to trace.
     76 // obj must point to the beginning of a heap object.
     77 //go:nowritebarrier
     78 func (ww *gcWork) put(obj uintptr) {
     79 	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
     80 
     81 	wbuf := w.wbuf.ptr()
     82 	if wbuf == nil {
     83 		wbuf = getpartialorempty(42)
     84 		w.wbuf = wbufptrOf(wbuf)
     85 	}
     86 
     87 	wbuf.obj[wbuf.nobj] = obj
     88 	wbuf.nobj++
     89 
     90 	if wbuf.nobj == len(wbuf.obj) {
     91 		putfull(wbuf, 50)
     92 		w.wbuf = 0
     93 	}
     94 }
     95 
     96 // tryGet dequeues a pointer for the garbage collector to trace.
     97 //
     98 // If there are no pointers remaining in this gcWork or in the global
     99 // queue, tryGet returns 0.  Note that there may still be pointers in
    100 // other gcWork instances or other caches.
    101 //go:nowritebarrier
    102 func (ww *gcWork) tryGet() uintptr {
    103 	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
    104 
    105 	wbuf := w.wbuf.ptr()
    106 	if wbuf == nil {
    107 		wbuf = trygetfull(74)
    108 		if wbuf == nil {
    109 			return 0
    110 		}
    111 		w.wbuf = wbufptrOf(wbuf)
    112 	}
    113 
    114 	wbuf.nobj--
    115 	obj := wbuf.obj[wbuf.nobj]
    116 
    117 	if wbuf.nobj == 0 {
    118 		putempty(wbuf, 86)
    119 		w.wbuf = 0
    120 	}
    121 
    122 	return obj
    123 }
    124 
    125 // get dequeues a pointer for the garbage collector to trace, blocking
    126 // if necessary to ensure all pointers from all queues and caches have
    127 // been retrieved.  get returns 0 if there are no pointers remaining.
    128 //go:nowritebarrier
    129 func (ww *gcWork) get() uintptr {
    130 	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
    131 
    132 	wbuf := w.wbuf.ptr()
    133 	if wbuf == nil {
    134 		wbuf = getfull(103)
    135 		if wbuf == nil {
    136 			return 0
    137 		}
    138 		wbuf.checknonempty()
    139 		w.wbuf = wbufptrOf(wbuf)
    140 	}
    141 
    142 	// TODO: This might be a good place to add prefetch code
    143 
    144 	wbuf.nobj--
    145 	obj := wbuf.obj[wbuf.nobj]
    146 
    147 	if wbuf.nobj == 0 {
    148 		putempty(wbuf, 115)
    149 		w.wbuf = 0
    150 	}
    151 
    152 	return obj
    153 }
    154 
    155 // dispose returns any cached pointers to the global queue.
    156 // The buffers are being put on the full queue so that the
    157 // write barriers will not simply reacquire them before the
    158 // GC can inspect them. This helps reduce the mutator's
    159 // ability to hide pointers during the concurrent mark phase.
    160 //
    161 //go:nowritebarrier
    162 func (w *gcWork) dispose() {
    163 	if wbuf := w.wbuf; wbuf != 0 {
    164 		if wbuf.ptr().nobj == 0 {
    165 			throw("dispose: workbuf is empty")
    166 		}
    167 		putfull(wbuf.ptr(), 166)
    168 		w.wbuf = 0
    169 	}
    170 	if w.bytesMarked != 0 {
    171 		// dispose happens relatively infrequently. If this
    172 		// atomic becomes a problem, we should first try to
    173 		// dispose less and if necessary aggregate in a per-P
    174 		// counter.
    175 		xadd64(&work.bytesMarked, int64(w.bytesMarked))
    176 		w.bytesMarked = 0
    177 	}
    178 	if w.scanWork != 0 {
    179 		xaddint64(&gcController.scanWork, w.scanWork)
    180 		w.scanWork = 0
    181 	}
    182 }
    183 
    184 // balance moves some work that's cached in this gcWork back on the
    185 // global queue.
    186 //go:nowritebarrier
    187 func (w *gcWork) balance() {
    188 	if wbuf := w.wbuf; wbuf != 0 && wbuf.ptr().nobj > 4 {
    189 		w.wbuf = wbufptrOf(handoff(wbuf.ptr()))
    190 	}
    191 }
    192 
    193 // empty returns true if w has no mark work available.
    194 //go:nowritebarrier
    195 func (w *gcWork) empty() bool {
    196 	wbuf := w.wbuf
    197 	return wbuf == 0 || wbuf.ptr().nobj == 0
    198 }
    199 
    200 // Internally, the GC work pool is kept in arrays in work buffers.
    201 // The gcWork interface caches a work buffer until full (or empty) to
    202 // avoid contending on the global work buffer lists.
    203 
    204 type workbufhdr struct {
    205 	node  lfnode // must be first
    206 	nobj  int
    207 	inuse bool   // This workbuf is in use by some gorotuine and is not on the work.empty/partial/full queues.
    208 	log   [4]int // line numbers forming a history of ownership changes to workbuf
    209 }
    210 
    211 type workbuf struct {
    212 	workbufhdr
    213 	// account for the above fields
    214 	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / ptrSize]uintptr
    215 }
    216 
    217 // workbuf factory routines. These funcs are used to manage the
    218 // workbufs.
    219 // If the GC asks for some work these are the only routines that
    220 // make partially full wbufs available to the GC.
    221 // Each of the gets and puts also take an distinct integer that is used
    222 // to record a brief history of changes to ownership of the workbuf.
    223 // The convention is to use a unique line number but any encoding
    224 // is permissible. For example if you want to pass in 2 bits of information
    225 // you could simple add lineno1*100000+lineno2.
    226 
    227 // logget records the past few values of entry to aid in debugging.
    228 // logget checks the buffer b is not currently in use.
    229 func (b *workbuf) logget(entry int) {
    230 	if !_Debugwbufs {
    231 		return
    232 	}
    233 	if b.inuse {
    234 		println("runtime: logget fails log entry=", entry,
    235 			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
    236 			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
    237 		throw("logget: get not legal")
    238 	}
    239 	b.inuse = true
    240 	copy(b.log[1:], b.log[:])
    241 	b.log[0] = entry
    242 }
    243 
    244 // logput records the past few values of entry to aid in debugging.
    245 // logput checks the buffer b is currently in use.
    246 func (b *workbuf) logput(entry int) {
    247 	if !_Debugwbufs {
    248 		return
    249 	}
    250 	if !b.inuse {
    251 		println("runtime: logput fails log entry=", entry,
    252 			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
    253 			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
    254 		throw("logput: put not legal")
    255 	}
    256 	b.inuse = false
    257 	copy(b.log[1:], b.log[:])
    258 	b.log[0] = entry
    259 }
    260 
    261 func (b *workbuf) checknonempty() {
    262 	if b.nobj == 0 {
    263 		println("runtime: nonempty check fails",
    264 			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
    265 			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
    266 		throw("workbuf is empty")
    267 	}
    268 }
    269 
    270 func (b *workbuf) checkempty() {
    271 	if b.nobj != 0 {
    272 		println("runtime: empty check fails",
    273 			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
    274 			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
    275 		throw("workbuf is not empty")
    276 	}
    277 }
    278 
    279 // getempty pops an empty work buffer off the work.empty list,
    280 // allocating new buffers if none are available.
    281 // entry is used to record a brief history of ownership.
    282 //go:nowritebarrier
    283 func getempty(entry int) *workbuf {
    284 	var b *workbuf
    285 	if work.empty != 0 {
    286 		b = (*workbuf)(lfstackpop(&work.empty))
    287 		if b != nil {
    288 			b.checkempty()
    289 		}
    290 	}
    291 	if b == nil {
    292 		b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
    293 	}
    294 	b.logget(entry)
    295 	return b
    296 }
    297 
    298 // putempty puts a workbuf onto the work.empty list.
    299 // Upon entry this go routine owns b. The lfstackpush relinquishes ownership.
    300 //go:nowritebarrier
    301 func putempty(b *workbuf, entry int) {
    302 	b.checkempty()
    303 	b.logput(entry)
    304 	lfstackpush(&work.empty, &b.node)
    305 }
    306 
    307 // putfull puts the workbuf on the work.full list for the GC.
    308 // putfull accepts partially full buffers so the GC can avoid competing
    309 // with the mutators for ownership of partially full buffers.
    310 //go:nowritebarrier
    311 func putfull(b *workbuf, entry int) {
    312 	b.checknonempty()
    313 	b.logput(entry)
    314 	lfstackpush(&work.full, &b.node)
    315 }
    316 
    317 // getpartialorempty tries to return a partially empty
    318 // and if none are available returns an empty one.
    319 // entry is used to provide a brief history of ownership
    320 // using entry + xxx00000 to
    321 // indicating that two line numbers in the call chain.
    322 //go:nowritebarrier
    323 func getpartialorempty(entry int) *workbuf {
    324 	b := (*workbuf)(lfstackpop(&work.partial))
    325 	if b != nil {
    326 		b.logget(entry)
    327 		return b
    328 	}
    329 	// Let getempty do the logget check but
    330 	// use the entry to encode that it passed
    331 	// through this routine.
    332 	b = getempty(entry + 80700000)
    333 	return b
    334 }
    335 
    336 // putpartial puts empty buffers on the work.empty queue,
    337 // full buffers on the work.full queue and
    338 // others on the work.partial queue.
    339 // entry is used to provide a brief history of ownership
    340 // using entry + xxx00000 to
    341 // indicating that two call chain line numbers.
    342 //go:nowritebarrier
    343 func putpartial(b *workbuf, entry int) {
    344 	if b.nobj == 0 {
    345 		putempty(b, entry+81500000)
    346 	} else if b.nobj < len(b.obj) {
    347 		b.logput(entry)
    348 		lfstackpush(&work.partial, &b.node)
    349 	} else if b.nobj == len(b.obj) {
    350 		b.logput(entry)
    351 		lfstackpush(&work.full, &b.node)
    352 	} else {
    353 		throw("putpartial: bad Workbuf b.nobj")
    354 	}
    355 }
    356 
    357 // trygetfull tries to get a full or partially empty workbuffer.
    358 // If one is not immediately available return nil
    359 //go:nowritebarrier
    360 func trygetfull(entry int) *workbuf {
    361 	b := (*workbuf)(lfstackpop(&work.full))
    362 	if b == nil {
    363 		b = (*workbuf)(lfstackpop(&work.partial))
    364 	}
    365 	if b != nil {
    366 		b.logget(entry)
    367 		b.checknonempty()
    368 		return b
    369 	}
    370 	return b
    371 }
    372 
    373 // Get a full work buffer off the work.full or a partially
    374 // filled one off the work.partial list. If nothing is available
    375 // wait until all the other gc helpers have finished and then
    376 // return nil.
    377 // getfull acts as a barrier for work.nproc helpers. As long as one
    378 // gchelper is actively marking objects it
    379 // may create a workbuffer that the other helpers can work on.
    380 // The for loop either exits when a work buffer is found
    381 // or when _all_ of the work.nproc GC helpers are in the loop
    382 // looking for work and thus not capable of creating new work.
    383 // This is in fact the termination condition for the STW mark
    384 // phase.
    385 //go:nowritebarrier
    386 func getfull(entry int) *workbuf {
    387 	b := (*workbuf)(lfstackpop(&work.full))
    388 	if b != nil {
    389 		b.logget(entry)
    390 		b.checknonempty()
    391 		return b
    392 	}
    393 	b = (*workbuf)(lfstackpop(&work.partial))
    394 	if b != nil {
    395 		b.logget(entry)
    396 		return b
    397 	}
    398 
    399 	incnwait := xadd(&work.nwait, +1)
    400 	if incnwait > work.nproc {
    401 		println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
    402 		throw("work.nwait > work.nproc")
    403 	}
    404 	for i := 0; ; i++ {
    405 		if work.full != 0 || work.partial != 0 {
    406 			decnwait := xadd(&work.nwait, -1)
    407 			if decnwait == work.nproc {
    408 				println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
    409 				throw("work.nwait > work.nproc")
    410 			}
    411 			b = (*workbuf)(lfstackpop(&work.full))
    412 			if b == nil {
    413 				b = (*workbuf)(lfstackpop(&work.partial))
    414 			}
    415 			if b != nil {
    416 				b.logget(entry)
    417 				b.checknonempty()
    418 				return b
    419 			}
    420 			incnwait := xadd(&work.nwait, +1)
    421 			if incnwait > work.nproc {
    422 				println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
    423 				throw("work.nwait > work.nproc")
    424 			}
    425 		}
    426 		if work.nwait == work.nproc {
    427 			return nil
    428 		}
    429 		_g_ := getg()
    430 		if i < 10 {
    431 			_g_.m.gcstats.nprocyield++
    432 			procyield(20)
    433 		} else if i < 20 {
    434 			_g_.m.gcstats.nosyield++
    435 			osyield()
    436 		} else {
    437 			_g_.m.gcstats.nsleep++
    438 			usleep(100)
    439 		}
    440 	}
    441 }
    442 
    443 //go:nowritebarrier
    444 func handoff(b *workbuf) *workbuf {
    445 	// Make new buffer with half of b's pointers.
    446 	b1 := getempty(915)
    447 	n := b.nobj / 2
    448 	b.nobj -= n
    449 	b1.nobj = n
    450 	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
    451 	_g_ := getg()
    452 	_g_.m.gcstats.nhandoff++
    453 	_g_.m.gcstats.nhandoffcnt += uint64(n)
    454 
    455 	// Put b on full list - let first half of b get stolen.
    456 	putfull(b, 942)
    457 	return b1
    458 }
    459