Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Garbage collector: finalizers and block profiling.
      6 
      7 package runtime
      8 
      9 import (
     10 	"runtime/internal/atomic"
     11 	"runtime/internal/sys"
     12 	"unsafe"
     13 )
     14 
     15 // finblock is allocated from non-GC'd memory, so any heap pointers
     16 // must be specially handled.
     17 //
     18 //go:notinheap
     19 type finblock struct {
     20 	alllink *finblock
     21 	next    *finblock
     22 	cnt     uint32
     23 	_       int32
     24 	fin     [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
     25 }
     26 
     27 var finlock mutex  // protects the following variables
     28 var fing *g        // goroutine that runs finalizers
     29 var finq *finblock // list of finalizers that are to be executed
     30 var finc *finblock // cache of free blocks
     31 var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
     32 var fingwait bool
     33 var fingwake bool
     34 var allfin *finblock // list of all blocks
     35 
     36 // NOTE: Layout known to queuefinalizer.
     37 type finalizer struct {
     38 	fn   *funcval       // function to call (may be a heap pointer)
     39 	arg  unsafe.Pointer // ptr to object (may be a heap pointer)
     40 	nret uintptr        // bytes of return values from fn
     41 	fint *_type         // type of first argument of fn
     42 	ot   *ptrtype       // type of ptr to object (may be a heap pointer)
     43 }
     44 
     45 var finalizer1 = [...]byte{
     46 	// Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
     47 	// Each byte describes 8 words.
     48 	// Need 8 Finalizers described by 5 bytes before pattern repeats:
     49 	//	ptr ptr INT ptr ptr
     50 	//	ptr ptr INT ptr ptr
     51 	//	ptr ptr INT ptr ptr
     52 	//	ptr ptr INT ptr ptr
     53 	//	ptr ptr INT ptr ptr
     54 	//	ptr ptr INT ptr ptr
     55 	//	ptr ptr INT ptr ptr
     56 	//	ptr ptr INT ptr ptr
     57 	// aka
     58 	//
     59 	//	ptr ptr INT ptr ptr ptr ptr INT
     60 	//	ptr ptr ptr ptr INT ptr ptr ptr
     61 	//	ptr INT ptr ptr ptr ptr INT ptr
     62 	//	ptr ptr ptr INT ptr ptr ptr ptr
     63 	//	INT ptr ptr ptr ptr INT ptr ptr
     64 	//
     65 	// Assumptions about Finalizer layout checked below.
     66 	1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
     67 	1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
     68 	1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
     69 	1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
     70 	0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
     71 }
     72 
     73 func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
     74 	lock(&finlock)
     75 	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
     76 		if finc == nil {
     77 			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
     78 			finc.alllink = allfin
     79 			allfin = finc
     80 			if finptrmask[0] == 0 {
     81 				// Build pointer mask for Finalizer array in block.
     82 				// Check assumptions made in finalizer1 array above.
     83 				if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
     84 					unsafe.Offsetof(finalizer{}.fn) != 0 ||
     85 					unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
     86 					unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
     87 					unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
     88 					unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
     89 					throw("finalizer out of sync")
     90 				}
     91 				for i := range finptrmask {
     92 					finptrmask[i] = finalizer1[i%len(finalizer1)]
     93 				}
     94 			}
     95 		}
     96 		block := finc
     97 		finc = block.next
     98 		block.next = finq
     99 		finq = block
    100 	}
    101 	f := &finq.fin[finq.cnt]
    102 	atomic.Xadd(&finq.cnt, +1) // Sync with markroots
    103 	f.fn = fn
    104 	f.nret = nret
    105 	f.fint = fint
    106 	f.ot = ot
    107 	f.arg = p
    108 	fingwake = true
    109 	unlock(&finlock)
    110 }
    111 
    112 //go:nowritebarrier
    113 func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
    114 	for fb := allfin; fb != nil; fb = fb.alllink {
    115 		for i := uint32(0); i < fb.cnt; i++ {
    116 			f := &fb.fin[i]
    117 			callback(f.fn, f.arg, f.nret, f.fint, f.ot)
    118 		}
    119 	}
    120 }
    121 
    122 func wakefing() *g {
    123 	var res *g
    124 	lock(&finlock)
    125 	if fingwait && fingwake {
    126 		fingwait = false
    127 		fingwake = false
    128 		res = fing
    129 	}
    130 	unlock(&finlock)
    131 	return res
    132 }
    133 
    134 var (
    135 	fingCreate  uint32
    136 	fingRunning bool
    137 )
    138 
    139 func createfing() {
    140 	// start the finalizer goroutine exactly once
    141 	if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
    142 		go runfinq()
    143 	}
    144 }
    145 
    146 // This is the goroutine that runs all of the finalizers
    147 func runfinq() {
    148 	var (
    149 		frame    unsafe.Pointer
    150 		framecap uintptr
    151 	)
    152 
    153 	for {
    154 		lock(&finlock)
    155 		fb := finq
    156 		finq = nil
    157 		if fb == nil {
    158 			gp := getg()
    159 			fing = gp
    160 			fingwait = true
    161 			goparkunlock(&finlock, "finalizer wait", traceEvGoBlock, 1)
    162 			continue
    163 		}
    164 		unlock(&finlock)
    165 		if raceenabled {
    166 			racefingo()
    167 		}
    168 		for fb != nil {
    169 			for i := fb.cnt; i > 0; i-- {
    170 				f := &fb.fin[i-1]
    171 
    172 				framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
    173 				if framecap < framesz {
    174 					// The frame does not contain pointers interesting for GC,
    175 					// all not yet finalized objects are stored in finq.
    176 					// If we do not mark it as FlagNoScan,
    177 					// the last finalized object is not collected.
    178 					frame = mallocgc(framesz, nil, true)
    179 					framecap = framesz
    180 				}
    181 
    182 				if f.fint == nil {
    183 					throw("missing type in runfinq")
    184 				}
    185 				// frame is effectively uninitialized
    186 				// memory. That means we have to clear
    187 				// it before writing to it to avoid
    188 				// confusing the write barrier.
    189 				*(*[2]uintptr)(frame) = [2]uintptr{}
    190 				switch f.fint.kind & kindMask {
    191 				case kindPtr:
    192 					// direct use of pointer
    193 					*(*unsafe.Pointer)(frame) = f.arg
    194 				case kindInterface:
    195 					ityp := (*interfacetype)(unsafe.Pointer(f.fint))
    196 					// set up with empty interface
    197 					(*eface)(frame)._type = &f.ot.typ
    198 					(*eface)(frame).data = f.arg
    199 					if len(ityp.mhdr) != 0 {
    200 						// convert to interface with methods
    201 						// this conversion is guaranteed to succeed - we checked in SetFinalizer
    202 						*(*iface)(frame) = assertE2I(ityp, *(*eface)(frame))
    203 					}
    204 				default:
    205 					throw("bad kind in runfinq")
    206 				}
    207 				fingRunning = true
    208 				reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
    209 				fingRunning = false
    210 
    211 				// Drop finalizer queue heap references
    212 				// before hiding them from markroot.
    213 				// This also ensures these will be
    214 				// clear if we reuse the finalizer.
    215 				f.fn = nil
    216 				f.arg = nil
    217 				f.ot = nil
    218 				atomic.Store(&fb.cnt, i-1)
    219 			}
    220 			next := fb.next
    221 			lock(&finlock)
    222 			fb.next = finc
    223 			finc = fb
    224 			unlock(&finlock)
    225 			fb = next
    226 		}
    227 	}
    228 }
    229 
    230 // SetFinalizer sets the finalizer associated with obj to the provided
    231 // finalizer function. When the garbage collector finds an unreachable block
    232 // with an associated finalizer, it clears the association and runs
    233 // finalizer(obj) in a separate goroutine. This makes obj reachable again,
    234 // but now without an associated finalizer. Assuming that SetFinalizer
    235 // is not called again, the next time the garbage collector sees
    236 // that obj is unreachable, it will free obj.
    237 //
    238 // SetFinalizer(obj, nil) clears any finalizer associated with obj.
    239 //
    240 // The argument obj must be a pointer to an object allocated by calling
    241 // new, by taking the address of a composite literal, or by taking the
    242 // address of a local variable.
    243 // The argument finalizer must be a function that takes a single argument
    244 // to which obj's type can be assigned, and can have arbitrary ignored return
    245 // values. If either of these is not true, SetFinalizer may abort the
    246 // program.
    247 //
    248 // Finalizers are run in dependency order: if A points at B, both have
    249 // finalizers, and they are otherwise unreachable, only the finalizer
    250 // for A runs; once A is freed, the finalizer for B can run.
    251 // If a cyclic structure includes a block with a finalizer, that
    252 // cycle is not guaranteed to be garbage collected and the finalizer
    253 // is not guaranteed to run, because there is no ordering that
    254 // respects the dependencies.
    255 //
    256 // The finalizer for obj is scheduled to run at some arbitrary time after
    257 // obj becomes unreachable.
    258 // There is no guarantee that finalizers will run before a program exits,
    259 // so typically they are useful only for releasing non-memory resources
    260 // associated with an object during a long-running program.
    261 // For example, an os.File object could use a finalizer to close the
    262 // associated operating system file descriptor when a program discards
    263 // an os.File without calling Close, but it would be a mistake
    264 // to depend on a finalizer to flush an in-memory I/O buffer such as a
    265 // bufio.Writer, because the buffer would not be flushed at program exit.
    266 //
    267 // It is not guaranteed that a finalizer will run if the size of *obj is
    268 // zero bytes.
    269 //
    270 // It is not guaranteed that a finalizer will run for objects allocated
    271 // in initializers for package-level variables. Such objects may be
    272 // linker-allocated, not heap-allocated.
    273 //
    274 // A finalizer may run as soon as an object becomes unreachable.
    275 // In order to use finalizers correctly, the program must ensure that
    276 // the object is reachable until it is no longer required.
    277 // Objects stored in global variables, or that can be found by tracing
    278 // pointers from a global variable, are reachable. For other objects,
    279 // pass the object to a call of the KeepAlive function to mark the
    280 // last point in the function where the object must be reachable.
    281 //
    282 // For example, if p points to a struct that contains a file descriptor d,
    283 // and p has a finalizer that closes that file descriptor, and if the last
    284 // use of p in a function is a call to syscall.Write(p.d, buf, size), then
    285 // p may be unreachable as soon as the program enters syscall.Write. The
    286 // finalizer may run at that moment, closing p.d, causing syscall.Write
    287 // to fail because it is writing to a closed file descriptor (or, worse,
    288 // to an entirely different file descriptor opened by a different goroutine).
    289 // To avoid this problem, call runtime.KeepAlive(p) after the call to
    290 // syscall.Write.
    291 //
    292 // A single goroutine runs all finalizers for a program, sequentially.
    293 // If a finalizer must run for a long time, it should do so by starting
    294 // a new goroutine.
    295 func SetFinalizer(obj interface{}, finalizer interface{}) {
    296 	if debug.sbrk != 0 {
    297 		// debug.sbrk never frees memory, so no finalizers run
    298 		// (and we don't have the data structures to record them).
    299 		return
    300 	}
    301 	e := efaceOf(&obj)
    302 	etyp := e._type
    303 	if etyp == nil {
    304 		throw("runtime.SetFinalizer: first argument is nil")
    305 	}
    306 	if etyp.kind&kindMask != kindPtr {
    307 		throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
    308 	}
    309 	ot := (*ptrtype)(unsafe.Pointer(etyp))
    310 	if ot.elem == nil {
    311 		throw("nil elem type!")
    312 	}
    313 
    314 	// find the containing object
    315 	_, base, _ := findObject(e.data)
    316 
    317 	if base == nil {
    318 		// 0-length objects are okay.
    319 		if e.data == unsafe.Pointer(&zerobase) {
    320 			return
    321 		}
    322 
    323 		// Global initializers might be linker-allocated.
    324 		//	var Foo = &Object{}
    325 		//	func main() {
    326 		//		runtime.SetFinalizer(Foo, nil)
    327 		//	}
    328 		// The relevant segments are: noptrdata, data, bss, noptrbss.
    329 		// We cannot assume they are in any order or even contiguous,
    330 		// due to external linking.
    331 		for datap := &firstmoduledata; datap != nil; datap = datap.next {
    332 			if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
    333 				datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
    334 				datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
    335 				datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
    336 				return
    337 			}
    338 		}
    339 		throw("runtime.SetFinalizer: pointer not in allocated block")
    340 	}
    341 
    342 	if e.data != base {
    343 		// As an implementation detail we allow to set finalizers for an inner byte
    344 		// of an object if it could come from tiny alloc (see mallocgc for details).
    345 		if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
    346 			throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
    347 		}
    348 	}
    349 
    350 	f := efaceOf(&finalizer)
    351 	ftyp := f._type
    352 	if ftyp == nil {
    353 		// switch to system stack and remove finalizer
    354 		systemstack(func() {
    355 			removefinalizer(e.data)
    356 		})
    357 		return
    358 	}
    359 
    360 	if ftyp.kind&kindMask != kindFunc {
    361 		throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
    362 	}
    363 	ft := (*functype)(unsafe.Pointer(ftyp))
    364 	if ft.dotdotdot() {
    365 		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
    366 	}
    367 	if ft.inCount != 1 {
    368 		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
    369 	}
    370 	fint := ft.in()[0]
    371 	switch {
    372 	case fint == etyp:
    373 		// ok - same type
    374 		goto okarg
    375 	case fint.kind&kindMask == kindPtr:
    376 		if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
    377 			// ok - not same type, but both pointers,
    378 			// one or the other is unnamed, and same element type, so assignable.
    379 			goto okarg
    380 		}
    381 	case fint.kind&kindMask == kindInterface:
    382 		ityp := (*interfacetype)(unsafe.Pointer(fint))
    383 		if len(ityp.mhdr) == 0 {
    384 			// ok - satisfies empty interface
    385 			goto okarg
    386 		}
    387 		if _, ok := assertE2I2(ityp, *efaceOf(&obj)); ok {
    388 			goto okarg
    389 		}
    390 	}
    391 	throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
    392 okarg:
    393 	// compute size needed for return parameters
    394 	nret := uintptr(0)
    395 	for _, t := range ft.out() {
    396 		nret = round(nret, uintptr(t.align)) + uintptr(t.size)
    397 	}
    398 	nret = round(nret, sys.PtrSize)
    399 
    400 	// make sure we have a finalizer goroutine
    401 	createfing()
    402 
    403 	systemstack(func() {
    404 		if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
    405 			throw("runtime.SetFinalizer: finalizer already set")
    406 		}
    407 	})
    408 }
    409 
    410 // Look up pointer v in heap. Return the span containing the object,
    411 // the start of the object, and the size of the object. If the object
    412 // does not exist, return nil, nil, 0.
    413 func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
    414 	c := gomcache()
    415 	c.local_nlookup++
    416 	if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
    417 		// purge cache stats to prevent overflow
    418 		lock(&mheap_.lock)
    419 		purgecachedstats(c)
    420 		unlock(&mheap_.lock)
    421 	}
    422 
    423 	// find span
    424 	arena_start := mheap_.arena_start
    425 	arena_used := mheap_.arena_used
    426 	if uintptr(v) < arena_start || uintptr(v) >= arena_used {
    427 		return
    428 	}
    429 	p := uintptr(v) >> pageShift
    430 	q := p - arena_start>>pageShift
    431 	s = mheap_.spans[q]
    432 	if s == nil {
    433 		return
    434 	}
    435 	x = unsafe.Pointer(s.base())
    436 
    437 	if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
    438 		s = nil
    439 		x = nil
    440 		return
    441 	}
    442 
    443 	n = s.elemsize
    444 	if s.sizeclass != 0 {
    445 		x = add(x, (uintptr(v)-uintptr(x))/n*n)
    446 	}
    447 	return
    448 }
    449 
    450 // Mark KeepAlive as noinline so that the current compiler will ensure
    451 // that the argument is alive at the point of the function call.
    452 // If it were inlined, it would disappear, and there would be nothing
    453 // keeping the argument alive. Perhaps a future compiler will recognize
    454 // runtime.KeepAlive specially and do something more efficient.
    455 //go:noinline
    456 
    457 // KeepAlive marks its argument as currently reachable.
    458 // This ensures that the object is not freed, and its finalizer is not run,
    459 // before the point in the program where KeepAlive is called.
    460 //
    461 // A very simplified example showing where KeepAlive is required:
    462 // 	type File struct { d int }
    463 // 	d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
    464 // 	// ... do something if err != nil ...
    465 // 	p := &File{d}
    466 // 	runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
    467 // 	var buf [10]byte
    468 // 	n, err := syscall.Read(p.d, buf[:])
    469 // 	// Ensure p is not finalized until Read returns.
    470 // 	runtime.KeepAlive(p)
    471 // 	// No more uses of p after this point.
    472 //
    473 // Without the KeepAlive call, the finalizer could run at the start of
    474 // syscall.Read, closing the file descriptor before syscall.Read makes
    475 // the actual system call.
    476 func KeepAlive(interface{}) {}
    477