Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Central free lists.
      6 //
      7 // See malloc.go for an overview.
      8 //
      9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
     10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
     11 // and those that are completely allocated (c->empty).
     12 
     13 package runtime
     14 
     15 // Central list of free objects of a given size.
     16 type mcentral struct {
     17 	lock      mutex
     18 	sizeclass int32
     19 	nonempty  mspan // list of spans with a free object
     20 	empty     mspan // list of spans with no free objects (or cached in an mcache)
     21 }
     22 
     23 // Initialize a single central free list.
     24 func mCentral_Init(c *mcentral, sizeclass int32) {
     25 	c.sizeclass = sizeclass
     26 	mSpanList_Init(&c.nonempty)
     27 	mSpanList_Init(&c.empty)
     28 }
     29 
     30 // Allocate a span to use in an MCache.
     31 func mCentral_CacheSpan(c *mcentral) *mspan {
     32 	// Deduct credit for this span allocation and sweep if necessary.
     33 	deductSweepCredit(uintptr(class_to_size[c.sizeclass]), 0)
     34 
     35 	lock(&c.lock)
     36 	sg := mheap_.sweepgen
     37 retry:
     38 	var s *mspan
     39 	for s = c.nonempty.next; s != &c.nonempty; s = s.next {
     40 		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
     41 			mSpanList_Remove(s)
     42 			mSpanList_InsertBack(&c.empty, s)
     43 			unlock(&c.lock)
     44 			mSpan_Sweep(s, true)
     45 			goto havespan
     46 		}
     47 		if s.sweepgen == sg-1 {
     48 			// the span is being swept by background sweeper, skip
     49 			continue
     50 		}
     51 		// we have a nonempty span that does not require sweeping, allocate from it
     52 		mSpanList_Remove(s)
     53 		mSpanList_InsertBack(&c.empty, s)
     54 		unlock(&c.lock)
     55 		goto havespan
     56 	}
     57 
     58 	for s = c.empty.next; s != &c.empty; s = s.next {
     59 		if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
     60 			// we have an empty span that requires sweeping,
     61 			// sweep it and see if we can free some space in it
     62 			mSpanList_Remove(s)
     63 			// swept spans are at the end of the list
     64 			mSpanList_InsertBack(&c.empty, s)
     65 			unlock(&c.lock)
     66 			mSpan_Sweep(s, true)
     67 			if s.freelist.ptr() != nil {
     68 				goto havespan
     69 			}
     70 			lock(&c.lock)
     71 			// the span is still empty after sweep
     72 			// it is already in the empty list, so just retry
     73 			goto retry
     74 		}
     75 		if s.sweepgen == sg-1 {
     76 			// the span is being swept by background sweeper, skip
     77 			continue
     78 		}
     79 		// already swept empty span,
     80 		// all subsequent ones must also be either swept or in process of sweeping
     81 		break
     82 	}
     83 	unlock(&c.lock)
     84 
     85 	// Replenish central list if empty.
     86 	s = mCentral_Grow(c)
     87 	if s == nil {
     88 		return nil
     89 	}
     90 	lock(&c.lock)
     91 	mSpanList_InsertBack(&c.empty, s)
     92 	unlock(&c.lock)
     93 
     94 	// At this point s is a non-empty span, queued at the end of the empty list,
     95 	// c is unlocked.
     96 havespan:
     97 	cap := int32((s.npages << _PageShift) / s.elemsize)
     98 	n := cap - int32(s.ref)
     99 	if n == 0 {
    100 		throw("empty span")
    101 	}
    102 	if s.freelist.ptr() == nil {
    103 		throw("freelist empty")
    104 	}
    105 	s.incache = true
    106 	return s
    107 }
    108 
    109 // Return span from an MCache.
    110 func mCentral_UncacheSpan(c *mcentral, s *mspan) {
    111 	lock(&c.lock)
    112 
    113 	s.incache = false
    114 
    115 	if s.ref == 0 {
    116 		throw("uncaching full span")
    117 	}
    118 
    119 	cap := int32((s.npages << _PageShift) / s.elemsize)
    120 	n := cap - int32(s.ref)
    121 	if n > 0 {
    122 		mSpanList_Remove(s)
    123 		mSpanList_Insert(&c.nonempty, s)
    124 	}
    125 	unlock(&c.lock)
    126 }
    127 
    128 // Free n objects from a span s back into the central free list c.
    129 // Called during sweep.
    130 // Returns true if the span was returned to heap.  Sets sweepgen to
    131 // the latest generation.
    132 // If preserve=true, don't return the span to heap nor relink in MCentral lists;
    133 // caller takes care of it.
    134 func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
    135 	if s.incache {
    136 		throw("freespan into cached span")
    137 	}
    138 
    139 	// Add the objects back to s's free list.
    140 	wasempty := s.freelist.ptr() == nil
    141 	end.ptr().next = s.freelist
    142 	s.freelist = start
    143 	s.ref -= uint16(n)
    144 
    145 	if preserve {
    146 		// preserve is set only when called from MCentral_CacheSpan above,
    147 		// the span must be in the empty list.
    148 		if s.next == nil {
    149 			throw("can't preserve unlinked span")
    150 		}
    151 		atomicstore(&s.sweepgen, mheap_.sweepgen)
    152 		return false
    153 	}
    154 
    155 	lock(&c.lock)
    156 
    157 	// Move to nonempty if necessary.
    158 	if wasempty {
    159 		mSpanList_Remove(s)
    160 		mSpanList_Insert(&c.nonempty, s)
    161 	}
    162 
    163 	// delay updating sweepgen until here.  This is the signal that
    164 	// the span may be used in an MCache, so it must come after the
    165 	// linked list operations above (actually, just after the
    166 	// lock of c above.)
    167 	atomicstore(&s.sweepgen, mheap_.sweepgen)
    168 
    169 	if s.ref != 0 {
    170 		unlock(&c.lock)
    171 		return false
    172 	}
    173 
    174 	// s is completely freed, return it to the heap.
    175 	mSpanList_Remove(s)
    176 	s.needzero = 1
    177 	s.freelist = 0
    178 	unlock(&c.lock)
    179 	heapBitsForSpan(s.base()).initSpan(s.layout())
    180 	mHeap_Free(&mheap_, s, 0)
    181 	return true
    182 }
    183 
    184 // Fetch a new span from the heap and carve into objects for the free list.
    185 func mCentral_Grow(c *mcentral) *mspan {
    186 	npages := uintptr(class_to_allocnpages[c.sizeclass])
    187 	size := uintptr(class_to_size[c.sizeclass])
    188 	n := (npages << _PageShift) / size
    189 
    190 	s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
    191 	if s == nil {
    192 		return nil
    193 	}
    194 
    195 	p := uintptr(s.start << _PageShift)
    196 	s.limit = p + size*n
    197 	head := gclinkptr(p)
    198 	tail := gclinkptr(p)
    199 	// i==0 iteration already done
    200 	for i := uintptr(1); i < n; i++ {
    201 		p += size
    202 		tail.ptr().next = gclinkptr(p)
    203 		tail = gclinkptr(p)
    204 	}
    205 	if s.freelist.ptr() != nil {
    206 		throw("freelist not empty")
    207 	}
    208 	tail.ptr().next = 0
    209 	s.freelist = head
    210 	heapBitsForSpan(s.base()).initSpan(s.layout())
    211 	return s
    212 }
    213