Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import "unsafe"
      8 
      9 // Per-thread (in Go, per-P) cache for small objects.
     10 // No locking needed because it is per-thread (per-P).
     11 type mcache struct {
     12 	// The following members are accessed on every malloc,
     13 	// so they are grouped here for better caching.
     14 	next_sample      int32   // trigger heap sample after allocating this many bytes
     15 	local_cachealloc uintptr // bytes allocated from cache since last lock of heap
     16 	local_scan       uintptr // bytes of scannable heap allocated
     17 	// Allocator cache for tiny objects w/o pointers.
     18 	// See "Tiny allocator" comment in malloc.go.
     19 	tiny             unsafe.Pointer
     20 	tinyoffset       uintptr
     21 	local_tinyallocs uintptr // number of tiny allocs not counted in other stats
     22 
     23 	// The rest is not accessed on every malloc.
     24 	alloc [_NumSizeClasses]*mspan // spans to allocate from
     25 
     26 	stackcache [_NumStackOrders]stackfreelist
     27 
     28 	// Local allocator stats, flushed during GC.
     29 	local_nlookup    uintptr                  // number of pointer lookups
     30 	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
     31 	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
     32 	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
     33 }
     34 
     35 // A gclink is a node in a linked list of blocks, like mlink,
     36 // but it is opaque to the garbage collector.
     37 // The GC does not trace the pointers during collection,
     38 // and the compiler does not emit write barriers for assignments
     39 // of gclinkptr values. Code should store references to gclinks
     40 // as gclinkptr, not as *gclink.
     41 type gclink struct {
     42 	next gclinkptr
     43 }
     44 
     45 // A gclinkptr is a pointer to a gclink, but it is opaque
     46 // to the garbage collector.
     47 type gclinkptr uintptr
     48 
     49 // ptr returns the *gclink form of p.
     50 // The result should be used for accessing fields, not stored
     51 // in other data structures.
     52 func (p gclinkptr) ptr() *gclink {
     53 	return (*gclink)(unsafe.Pointer(p))
     54 }
     55 
     56 type stackfreelist struct {
     57 	list gclinkptr // linked list of free stacks
     58 	size uintptr   // total size of stacks in list
     59 }
     60 
     61 // dummy MSpan that contains no free objects.
     62 var emptymspan mspan
     63 
     64 func allocmcache() *mcache {
     65 	lock(&mheap_.lock)
     66 	c := (*mcache)(fixAlloc_Alloc(&mheap_.cachealloc))
     67 	unlock(&mheap_.lock)
     68 	memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
     69 	for i := 0; i < _NumSizeClasses; i++ {
     70 		c.alloc[i] = &emptymspan
     71 	}
     72 
     73 	// Set first allocation sample size.
     74 	rate := MemProfileRate
     75 	if rate > 0x3fffffff { // make 2*rate not overflow
     76 		rate = 0x3fffffff
     77 	}
     78 	if rate != 0 {
     79 		c.next_sample = int32(int(fastrand1()) % (2 * rate))
     80 	}
     81 
     82 	return c
     83 }
     84 
     85 func freemcache(c *mcache) {
     86 	systemstack(func() {
     87 		mCache_ReleaseAll(c)
     88 		stackcache_clear(c)
     89 
     90 		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
     91 		// with the stealing of gcworkbufs during garbage collection to avoid
     92 		// a race where the workbuf is double-freed.
     93 		// gcworkbuffree(c.gcworkbuf)
     94 
     95 		lock(&mheap_.lock)
     96 		purgecachedstats(c)
     97 		fixAlloc_Free(&mheap_.cachealloc, unsafe.Pointer(c))
     98 		unlock(&mheap_.lock)
     99 	})
    100 }
    101 
    102 // Gets a span that has a free object in it and assigns it
    103 // to be the cached span for the given sizeclass.  Returns this span.
    104 func mCache_Refill(c *mcache, sizeclass int32) *mspan {
    105 	_g_ := getg()
    106 
    107 	_g_.m.locks++
    108 	// Return the current cached span to the central lists.
    109 	s := c.alloc[sizeclass]
    110 	if s.freelist.ptr() != nil {
    111 		throw("refill on a nonempty span")
    112 	}
    113 	if s != &emptymspan {
    114 		s.incache = false
    115 	}
    116 
    117 	// Get a new cached span from the central lists.
    118 	s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
    119 	if s == nil {
    120 		throw("out of memory")
    121 	}
    122 	if s.freelist.ptr() == nil {
    123 		println(s.ref, (s.npages<<_PageShift)/s.elemsize)
    124 		throw("empty span")
    125 	}
    126 	c.alloc[sizeclass] = s
    127 	_g_.m.locks--
    128 	return s
    129 }
    130 
    131 func mCache_ReleaseAll(c *mcache) {
    132 	for i := 0; i < _NumSizeClasses; i++ {
    133 		s := c.alloc[i]
    134 		if s != &emptymspan {
    135 			mCentral_UncacheSpan(&mheap_.central[i].mcentral, s)
    136 			c.alloc[i] = &emptymspan
    137 		}
    138 	}
    139 }
    140