Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Central free lists.
      6 //
      7 // See malloc.go for an overview.
      8 //
      9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
     10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
     11 // and those that are completely allocated (c->empty).
     12 
     13 package runtime
     14 
     15 import "runtime/internal/atomic"
     16 
     17 // Central list of free objects of a given size.
     18 //
     19 //go:notinheap
     20 type mcentral struct {
     21 	lock      mutex
     22 	spanclass spanClass
     23 	nonempty  mSpanList // list of spans with a free object, ie a nonempty free list
     24 	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
     25 
     26 	// nmalloc is the cumulative count of objects allocated from
     27 	// this mcentral, assuming all spans in mcaches are
     28 	// fully-allocated. Written atomically, read under STW.
     29 	nmalloc uint64
     30 }
     31 
     32 // Initialize a single central free list.
     33 func (c *mcentral) init(spc spanClass) {
     34 	c.spanclass = spc
     35 	c.nonempty.init()
     36 	c.empty.init()
     37 }
     38 
     39 // Allocate a span to use in an MCache.
     40 func (c *mcentral) cacheSpan() *mspan {
     41 	// Deduct credit for this span allocation and sweep if necessary.
     42 	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
     43 	deductSweepCredit(spanBytes, 0)
     44 
     45 	lock(&c.lock)
     46 	traceDone := false
     47 	if trace.enabled {
     48 		traceGCSweepStart()
     49 	}
     50 	sg := mheap_.sweepgen
     51 retry:
     52 	var s *mspan
     53 	for s = c.nonempty.first; s != nil; s = s.next {
     54 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
     55 			c.nonempty.remove(s)
     56 			c.empty.insertBack(s)
     57 			unlock(&c.lock)
     58 			s.sweep(true)
     59 			goto havespan
     60 		}
     61 		if s.sweepgen == sg-1 {
     62 			// the span is being swept by background sweeper, skip
     63 			continue
     64 		}
     65 		// we have a nonempty span that does not require sweeping, allocate from it
     66 		c.nonempty.remove(s)
     67 		c.empty.insertBack(s)
     68 		unlock(&c.lock)
     69 		goto havespan
     70 	}
     71 
     72 	for s = c.empty.first; s != nil; s = s.next {
     73 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
     74 			// we have an empty span that requires sweeping,
     75 			// sweep it and see if we can free some space in it
     76 			c.empty.remove(s)
     77 			// swept spans are at the end of the list
     78 			c.empty.insertBack(s)
     79 			unlock(&c.lock)
     80 			s.sweep(true)
     81 			freeIndex := s.nextFreeIndex()
     82 			if freeIndex != s.nelems {
     83 				s.freeindex = freeIndex
     84 				goto havespan
     85 			}
     86 			lock(&c.lock)
     87 			// the span is still empty after sweep
     88 			// it is already in the empty list, so just retry
     89 			goto retry
     90 		}
     91 		if s.sweepgen == sg-1 {
     92 			// the span is being swept by background sweeper, skip
     93 			continue
     94 		}
     95 		// already swept empty span,
     96 		// all subsequent ones must also be either swept or in process of sweeping
     97 		break
     98 	}
     99 	if trace.enabled {
    100 		traceGCSweepDone()
    101 		traceDone = true
    102 	}
    103 	unlock(&c.lock)
    104 
    105 	// Replenish central list if empty.
    106 	s = c.grow()
    107 	if s == nil {
    108 		return nil
    109 	}
    110 	lock(&c.lock)
    111 	c.empty.insertBack(s)
    112 	unlock(&c.lock)
    113 
    114 	// At this point s is a non-empty span, queued at the end of the empty list,
    115 	// c is unlocked.
    116 havespan:
    117 	if trace.enabled && !traceDone {
    118 		traceGCSweepDone()
    119 	}
    120 	cap := int32((s.npages << _PageShift) / s.elemsize)
    121 	n := cap - int32(s.allocCount)
    122 	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
    123 		throw("span has no free objects")
    124 	}
    125 	// Assume all objects from this span will be allocated in the
    126 	// mcache. If it gets uncached, we'll adjust this.
    127 	atomic.Xadd64(&c.nmalloc, int64(n))
    128 	usedBytes := uintptr(s.allocCount) * s.elemsize
    129 	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
    130 	if trace.enabled {
    131 		// heap_live changed.
    132 		traceHeapAlloc()
    133 	}
    134 	if gcBlackenEnabled != 0 {
    135 		// heap_live changed.
    136 		gcController.revise()
    137 	}
    138 	s.incache = true
    139 	freeByteBase := s.freeindex &^ (64 - 1)
    140 	whichByte := freeByteBase / 8
    141 	// Init alloc bits cache.
    142 	s.refillAllocCache(whichByte)
    143 
    144 	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
    145 	// s.allocCache.
    146 	s.allocCache >>= s.freeindex % 64
    147 
    148 	return s
    149 }
    150 
    151 // Return span from an MCache.
    152 func (c *mcentral) uncacheSpan(s *mspan) {
    153 	lock(&c.lock)
    154 
    155 	s.incache = false
    156 
    157 	if s.allocCount == 0 {
    158 		throw("uncaching span but s.allocCount == 0")
    159 	}
    160 
    161 	cap := int32((s.npages << _PageShift) / s.elemsize)
    162 	n := cap - int32(s.allocCount)
    163 	if n > 0 {
    164 		c.empty.remove(s)
    165 		c.nonempty.insert(s)
    166 		// mCentral_CacheSpan conservatively counted
    167 		// unallocated slots in heap_live. Undo this.
    168 		atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
    169 		// cacheSpan updated alloc assuming all objects on s
    170 		// were going to be allocated. Adjust for any that
    171 		// weren't.
    172 		atomic.Xadd64(&c.nmalloc, -int64(n))
    173 	}
    174 	unlock(&c.lock)
    175 }
    176 
    177 // freeSpan updates c and s after sweeping s.
    178 // It sets s's sweepgen to the latest generation,
    179 // and, based on the number of free objects in s,
    180 // moves s to the appropriate list of c or returns it
    181 // to the heap.
    182 // freeSpan returns true if s was returned to the heap.
    183 // If preserve=true, it does not move s (the caller
    184 // must take care of it).
    185 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
    186 	if s.incache {
    187 		throw("freeSpan given cached span")
    188 	}
    189 	s.needzero = 1
    190 
    191 	if preserve {
    192 		// preserve is set only when called from MCentral_CacheSpan above,
    193 		// the span must be in the empty list.
    194 		if !s.inList() {
    195 			throw("can't preserve unlinked span")
    196 		}
    197 		atomic.Store(&s.sweepgen, mheap_.sweepgen)
    198 		return false
    199 	}
    200 
    201 	lock(&c.lock)
    202 
    203 	// Move to nonempty if necessary.
    204 	if wasempty {
    205 		c.empty.remove(s)
    206 		c.nonempty.insert(s)
    207 	}
    208 
    209 	// delay updating sweepgen until here. This is the signal that
    210 	// the span may be used in an MCache, so it must come after the
    211 	// linked list operations above (actually, just after the
    212 	// lock of c above.)
    213 	atomic.Store(&s.sweepgen, mheap_.sweepgen)
    214 
    215 	if s.allocCount != 0 {
    216 		unlock(&c.lock)
    217 		return false
    218 	}
    219 
    220 	c.nonempty.remove(s)
    221 	unlock(&c.lock)
    222 	mheap_.freeSpan(s, 0)
    223 	return true
    224 }
    225 
    226 // grow allocates a new empty span from the heap and initializes it for c's size class.
    227 func (c *mcentral) grow() *mspan {
    228 	npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
    229 	size := uintptr(class_to_size[c.spanclass.sizeclass()])
    230 	n := (npages << _PageShift) / size
    231 
    232 	s := mheap_.alloc(npages, c.spanclass, false, true)
    233 	if s == nil {
    234 		return nil
    235 	}
    236 
    237 	p := s.base()
    238 	s.limit = p + size*n
    239 
    240 	heapBitsForSpan(s.base()).initSpan(s)
    241 	return s
    242 }
    243