Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import "unsafe"
      8 
      9 // Per-thread (in Go, per-P) cache for small objects.
     10 // No locking needed because it is per-thread (per-P).
     11 //
     12 // mcaches are allocated from non-GC'd memory, so any heap pointers
     13 // must be specially handled.
     14 //
     15 //go:notinheap
     16 type mcache struct {
     17 	// The following members are accessed on every malloc,
     18 	// so they are grouped here for better caching.
     19 	next_sample int32   // trigger heap sample after allocating this many bytes
     20 	local_scan  uintptr // bytes of scannable heap allocated
     21 
     22 	// Allocator cache for tiny objects w/o pointers.
     23 	// See "Tiny allocator" comment in malloc.go.
     24 
     25 	// tiny points to the beginning of the current tiny block, or
     26 	// nil if there is no current tiny block.
     27 	//
     28 	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
     29 	// we handle it by clearing it in releaseAll during mark
     30 	// termination.
     31 	tiny             uintptr
     32 	tinyoffset       uintptr
     33 	local_tinyallocs uintptr // number of tiny allocs not counted in other stats
     34 
     35 	// The rest is not accessed on every malloc.
     36 	alloc [_NumSizeClasses]*mspan // spans to allocate from
     37 
     38 	stackcache [_NumStackOrders]stackfreelist
     39 
     40 	// Local allocator stats, flushed during GC.
     41 	local_nlookup    uintptr                  // number of pointer lookups
     42 	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
     43 	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
     44 	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
     45 }
     46 
     47 // A gclink is a node in a linked list of blocks, like mlink,
     48 // but it is opaque to the garbage collector.
     49 // The GC does not trace the pointers during collection,
     50 // and the compiler does not emit write barriers for assignments
     51 // of gclinkptr values. Code should store references to gclinks
     52 // as gclinkptr, not as *gclink.
     53 type gclink struct {
     54 	next gclinkptr
     55 }
     56 
     57 // A gclinkptr is a pointer to a gclink, but it is opaque
     58 // to the garbage collector.
     59 type gclinkptr uintptr
     60 
     61 // ptr returns the *gclink form of p.
     62 // The result should be used for accessing fields, not stored
     63 // in other data structures.
     64 func (p gclinkptr) ptr() *gclink {
     65 	return (*gclink)(unsafe.Pointer(p))
     66 }
     67 
     68 type stackfreelist struct {
     69 	list gclinkptr // linked list of free stacks
     70 	size uintptr   // total size of stacks in list
     71 }
     72 
     73 // dummy MSpan that contains no free objects.
     74 var emptymspan mspan
     75 
     76 func allocmcache() *mcache {
     77 	lock(&mheap_.lock)
     78 	c := (*mcache)(mheap_.cachealloc.alloc())
     79 	unlock(&mheap_.lock)
     80 	for i := 0; i < _NumSizeClasses; i++ {
     81 		c.alloc[i] = &emptymspan
     82 	}
     83 	c.next_sample = nextSample()
     84 	return c
     85 }
     86 
     87 func freemcache(c *mcache) {
     88 	systemstack(func() {
     89 		c.releaseAll()
     90 		stackcache_clear(c)
     91 
     92 		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
     93 		// with the stealing of gcworkbufs during garbage collection to avoid
     94 		// a race where the workbuf is double-freed.
     95 		// gcworkbuffree(c.gcworkbuf)
     96 
     97 		lock(&mheap_.lock)
     98 		purgecachedstats(c)
     99 		mheap_.cachealloc.free(unsafe.Pointer(c))
    100 		unlock(&mheap_.lock)
    101 	})
    102 }
    103 
    104 // Gets a span that has a free object in it and assigns it
    105 // to be the cached span for the given sizeclass. Returns this span.
    106 func (c *mcache) refill(sizeclass int32) *mspan {
    107 	_g_ := getg()
    108 
    109 	_g_.m.locks++
    110 	// Return the current cached span to the central lists.
    111 	s := c.alloc[sizeclass]
    112 
    113 	if uintptr(s.allocCount) != s.nelems {
    114 		throw("refill of span with free space remaining")
    115 	}
    116 
    117 	if s != &emptymspan {
    118 		s.incache = false
    119 	}
    120 
    121 	// Get a new cached span from the central lists.
    122 	s = mheap_.central[sizeclass].mcentral.cacheSpan()
    123 	if s == nil {
    124 		throw("out of memory")
    125 	}
    126 
    127 	if uintptr(s.allocCount) == s.nelems {
    128 		throw("span has no free space")
    129 	}
    130 
    131 	c.alloc[sizeclass] = s
    132 	_g_.m.locks--
    133 	return s
    134 }
    135 
    136 func (c *mcache) releaseAll() {
    137 	for i := 0; i < _NumSizeClasses; i++ {
    138 		s := c.alloc[i]
    139 		if s != &emptymspan {
    140 			mheap_.central[i].mcentral.uncacheSpan(s)
    141 			c.alloc[i] = &emptymspan
    142 		}
    143 	}
    144 	// Clear tinyalloc pool.
    145 	c.tiny = 0
    146 	c.tinyoffset = 0
    147 }
    148