Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Central free lists.
      6 //
      7 // See malloc.go for an overview.
      8 //
      9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
     10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
     11 // and those that are completely allocated (c->empty).
     12 
     13 package runtime
     14 
     15 import "runtime/internal/atomic"
     16 
     17 // Central list of free objects of a given size.
     18 //
     19 //go:notinheap
     20 type mcentral struct {
     21 	lock      mutex
     22 	sizeclass int32
     23 	nonempty  mSpanList // list of spans with a free object, ie a nonempty free list
     24 	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
     25 }
     26 
     27 // Initialize a single central free list.
     28 func (c *mcentral) init(sizeclass int32) {
     29 	c.sizeclass = sizeclass
     30 	c.nonempty.init()
     31 	c.empty.init()
     32 }
     33 
     34 // Allocate a span to use in an MCache.
     35 func (c *mcentral) cacheSpan() *mspan {
     36 	// Deduct credit for this span allocation and sweep if necessary.
     37 	spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
     38 	deductSweepCredit(spanBytes, 0)
     39 
     40 	lock(&c.lock)
     41 	sg := mheap_.sweepgen
     42 retry:
     43 	var s *mspan
     44 	for s = c.nonempty.first; s != nil; s = s.next {
     45 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
     46 			c.nonempty.remove(s)
     47 			c.empty.insertBack(s)
     48 			unlock(&c.lock)
     49 			s.sweep(true)
     50 			goto havespan
     51 		}
     52 		if s.sweepgen == sg-1 {
     53 			// the span is being swept by background sweeper, skip
     54 			continue
     55 		}
     56 		// we have a nonempty span that does not require sweeping, allocate from it
     57 		c.nonempty.remove(s)
     58 		c.empty.insertBack(s)
     59 		unlock(&c.lock)
     60 		goto havespan
     61 	}
     62 
     63 	for s = c.empty.first; s != nil; s = s.next {
     64 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
     65 			// we have an empty span that requires sweeping,
     66 			// sweep it and see if we can free some space in it
     67 			c.empty.remove(s)
     68 			// swept spans are at the end of the list
     69 			c.empty.insertBack(s)
     70 			unlock(&c.lock)
     71 			s.sweep(true)
     72 			freeIndex := s.nextFreeIndex()
     73 			if freeIndex != s.nelems {
     74 				s.freeindex = freeIndex
     75 				goto havespan
     76 			}
     77 			lock(&c.lock)
     78 			// the span is still empty after sweep
     79 			// it is already in the empty list, so just retry
     80 			goto retry
     81 		}
     82 		if s.sweepgen == sg-1 {
     83 			// the span is being swept by background sweeper, skip
     84 			continue
     85 		}
     86 		// already swept empty span,
     87 		// all subsequent ones must also be either swept or in process of sweeping
     88 		break
     89 	}
     90 	unlock(&c.lock)
     91 
     92 	// Replenish central list if empty.
     93 	s = c.grow()
     94 	if s == nil {
     95 		return nil
     96 	}
     97 	lock(&c.lock)
     98 	c.empty.insertBack(s)
     99 	unlock(&c.lock)
    100 
    101 	// At this point s is a non-empty span, queued at the end of the empty list,
    102 	// c is unlocked.
    103 havespan:
    104 	cap := int32((s.npages << _PageShift) / s.elemsize)
    105 	n := cap - int32(s.allocCount)
    106 	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
    107 		throw("span has no free objects")
    108 	}
    109 	usedBytes := uintptr(s.allocCount) * s.elemsize
    110 	if usedBytes > 0 {
    111 		reimburseSweepCredit(usedBytes)
    112 	}
    113 	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
    114 	if trace.enabled {
    115 		// heap_live changed.
    116 		traceHeapAlloc()
    117 	}
    118 	if gcBlackenEnabled != 0 {
    119 		// heap_live changed.
    120 		gcController.revise()
    121 	}
    122 	s.incache = true
    123 	freeByteBase := s.freeindex &^ (64 - 1)
    124 	whichByte := freeByteBase / 8
    125 	// Init alloc bits cache.
    126 	s.refillAllocCache(whichByte)
    127 
    128 	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
    129 	// s.allocCache.
    130 	s.allocCache >>= s.freeindex % 64
    131 
    132 	return s
    133 }
    134 
    135 // Return span from an MCache.
    136 func (c *mcentral) uncacheSpan(s *mspan) {
    137 	lock(&c.lock)
    138 
    139 	s.incache = false
    140 
    141 	if s.allocCount == 0 {
    142 		throw("uncaching span but s.allocCount == 0")
    143 	}
    144 
    145 	cap := int32((s.npages << _PageShift) / s.elemsize)
    146 	n := cap - int32(s.allocCount)
    147 	if n > 0 {
    148 		c.empty.remove(s)
    149 		c.nonempty.insert(s)
    150 		// mCentral_CacheSpan conservatively counted
    151 		// unallocated slots in heap_live. Undo this.
    152 		atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
    153 	}
    154 	unlock(&c.lock)
    155 }
    156 
    157 // freeSpan updates c and s after sweeping s.
    158 // It sets s's sweepgen to the latest generation,
    159 // and, based on the number of free objects in s,
    160 // moves s to the appropriate list of c or returns it
    161 // to the heap.
    162 // freeSpan returns true if s was returned to the heap.
    163 // If preserve=true, it does not move s (the caller
    164 // must take care of it).
    165 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
    166 	if s.incache {
    167 		throw("freeSpan given cached span")
    168 	}
    169 	s.needzero = 1
    170 
    171 	if preserve {
    172 		// preserve is set only when called from MCentral_CacheSpan above,
    173 		// the span must be in the empty list.
    174 		if !s.inList() {
    175 			throw("can't preserve unlinked span")
    176 		}
    177 		atomic.Store(&s.sweepgen, mheap_.sweepgen)
    178 		return false
    179 	}
    180 
    181 	lock(&c.lock)
    182 
    183 	// Move to nonempty if necessary.
    184 	if wasempty {
    185 		c.empty.remove(s)
    186 		c.nonempty.insert(s)
    187 	}
    188 
    189 	// delay updating sweepgen until here. This is the signal that
    190 	// the span may be used in an MCache, so it must come after the
    191 	// linked list operations above (actually, just after the
    192 	// lock of c above.)
    193 	atomic.Store(&s.sweepgen, mheap_.sweepgen)
    194 
    195 	if s.allocCount != 0 {
    196 		unlock(&c.lock)
    197 		return false
    198 	}
    199 
    200 	c.nonempty.remove(s)
    201 	unlock(&c.lock)
    202 	mheap_.freeSpan(s, 0)
    203 	return true
    204 }
    205 
    206 // grow allocates a new empty span from the heap and initializes it for c's size class.
    207 func (c *mcentral) grow() *mspan {
    208 	npages := uintptr(class_to_allocnpages[c.sizeclass])
    209 	size := uintptr(class_to_size[c.sizeclass])
    210 	n := (npages << _PageShift) / size
    211 
    212 	s := mheap_.alloc(npages, c.sizeclass, false, true)
    213 	if s == nil {
    214 		return nil
    215 	}
    216 
    217 	p := s.base()
    218 	s.limit = p + size*n
    219 
    220 	heapBitsForSpan(s.base()).initSpan(s)
    221 	return s
    222 }
    223