Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Memory allocator.
      6 //
      7 // This was originally based on tcmalloc, but has diverged quite a bit.
      8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
      9 
     10 // The main allocator works in runs of pages.
     11 // Small allocation sizes (up to and including 32 kB) are
     12 // rounded to one of about 70 size classes, each of which
     13 // has its own free set of objects of exactly that size.
     14 // Any free page of memory can be split into a set of objects
     15 // of one size class, which are then managed using a free bitmap.
     16 //
     17 // The allocator's data structures are:
     18 //
     19 //	fixalloc: a free-list allocator for fixed-size off-heap objects,
     20 //		used to manage storage used by the allocator.
     21 //	mheap: the malloc heap, managed at page (8192-byte) granularity.
     22 //	mspan: a run of pages managed by the mheap.
     23 //	mcentral: collects all spans of a given size class.
     24 //	mcache: a per-P cache of mspans with free space.
     25 //	mstats: allocation statistics.
     26 //
     27 // Allocating a small object proceeds up a hierarchy of caches:
     28 //
     29 //	1. Round the size up to one of the small size classes
     30 //	   and look in the corresponding mspan in this P's mcache.
     31 //	   Scan the mspan's free bitmap to find a free slot.
     32 //	   If there is a free slot, allocate it.
     33 //	   This can all be done without acquiring a lock.
     34 //
     35 //	2. If the mspan has no free slots, obtain a new mspan
     36 //	   from the mcentral's list of mspans of the required size
     37 //	   class that have free space.
     38 //	   Obtaining a whole span amortizes the cost of locking
     39 //	   the mcentral.
     40 //
     41 //	3. If the mcentral's mspan list is empty, obtain a run
     42 //	   of pages from the mheap to use for the mspan.
     43 //
     44 //	4. If the mheap is empty or has no page runs large enough,
     45 //	   allocate a new group of pages (at least 1MB) from the
     46 //	   operating system. Allocating a large run of pages
     47 //	   amortizes the cost of talking to the operating system.
     48 //
     49 // Sweeping an mspan and freeing objects on it proceeds up a similar
     50 // hierarchy:
     51 //
     52 //	1. If the mspan is being swept in response to allocation, it
     53 //	   is returned to the mcache to satisfy the allocation.
     54 //
     55 //	2. Otherwise, if the mspan still has allocated objects in it,
     56 //	   it is placed on the mcentral free list for the mspan's size
     57 //	   class.
     58 //
     59 //	3. Otherwise, if all objects in the mspan are free, the mspan
     60 //	   is now "idle", so it is returned to the mheap and no longer
     61 //	   has a size class.
     62 //	   This may coalesce it with adjacent idle mspans.
     63 //
     64 //	4. If an mspan remains idle for long enough, return its pages
     65 //	   to the operating system.
     66 //
     67 // Allocating and freeing a large object uses the mheap
     68 // directly, bypassing the mcache and mcentral.
     69 //
     70 // Free object slots in an mspan are zeroed only if mspan.needzero is
     71 // false. If needzero is true, objects are zeroed as they are
     72 // allocated. There are various benefits to delaying zeroing this way:
     73 //
     74 //	1. Stack frame allocation can avoid zeroing altogether.
     75 //
     76 //	2. It exhibits better temporal locality, since the program is
     77 //	   probably about to write to the memory.
     78 //
     79 //	3. We don't zero pages that never get reused.
     80 
     81 package runtime
     82 
     83 import (
     84 	"runtime/internal/sys"
     85 	"unsafe"
     86 )
     87 
     88 const (
     89 	debugMalloc = false
     90 
     91 	maxTinySize   = _TinySize
     92 	tinySizeClass = _TinySizeClass
     93 	maxSmallSize  = _MaxSmallSize
     94 
     95 	pageShift = _PageShift
     96 	pageSize  = _PageSize
     97 	pageMask  = _PageMask
     98 	// By construction, single page spans of the smallest object class
     99 	// have the most objects per span.
    100 	maxObjsPerSpan = pageSize / 8
    101 
    102 	mSpanInUse = _MSpanInUse
    103 
    104 	concurrentSweep = _ConcurrentSweep
    105 
    106 	_PageSize = 1 << _PageShift
    107 	_PageMask = _PageSize - 1
    108 
    109 	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
    110 	_64bit = 1 << (^uintptr(0) >> 63) / 2
    111 
    112 	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
    113 	_TinySize      = 16
    114 	_TinySizeClass = 2
    115 
    116 	_FixAllocChunk  = 16 << 10               // Chunk size for FixAlloc
    117 	_MaxMHeapList   = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
    118 	_HeapAllocChunk = 1 << 20                // Chunk size for heap growth
    119 
    120 	// Per-P, per order stack segment cache size.
    121 	_StackCacheSize = 32 * 1024
    122 
    123 	// Number of orders that get caching. Order 0 is FixedStack
    124 	// and each successive order is twice as large.
    125 	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
    126 	// will be allocated directly.
    127 	// Since FixedStack is different on different systems, we
    128 	// must vary NumStackOrders to keep the same maximum cached size.
    129 	//   OS               | FixedStack | NumStackOrders
    130 	//   -----------------+------------+---------------
    131 	//   linux/darwin/bsd | 2KB        | 4
    132 	//   windows/32       | 4KB        | 3
    133 	//   windows/64       | 8KB        | 2
    134 	//   plan9            | 4KB        | 3
    135 	_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
    136 
    137 	// Number of bits in page to span calculations (4k pages).
    138 	// On Windows 64-bit we limit the arena to 32GB or 35 bits.
    139 	// Windows counts memory used by page table into committed memory
    140 	// of the process, so we can't reserve too much memory.
    141 	// See https://golang.org/issue/5402 and https://golang.org/issue/5236.
    142 	// On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
    143 	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
    144 	// The only exception is mips32 which only has access to low 2GB of virtual memory.
    145 	// On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
    146 	// but as most devices have less than 4GB of physical memory anyway, we
    147 	// try to be conservative here, and only ask for a 2GB heap.
    148 	_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle))
    149 	_MHeapMap_Bits      = _MHeapMap_TotalBits - _PageShift
    150 
    151 	_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
    152 
    153 	// Max number of threads to run garbage collection.
    154 	// 2, 3, and 4 are all plausible maximums depending
    155 	// on the hardware details of the machine. The garbage
    156 	// collector scales well to 32 cpus.
    157 	_MaxGcproc = 32
    158 
    159 	_MaxArena32 = 1<<32 - 1
    160 
    161 	// minLegalPointer is the smallest possible legal pointer.
    162 	// This is the smallest possible architectural page size,
    163 	// since we assume that the first page is never mapped.
    164 	//
    165 	// This should agree with minZeroPage in the compiler.
    166 	minLegalPointer uintptr = 4096
    167 )
    168 
    169 // physPageSize is the size in bytes of the OS's physical pages.
    170 // Mapping and unmapping operations must be done at multiples of
    171 // physPageSize.
    172 //
    173 // This must be set by the OS init code (typically in osinit) before
    174 // mallocinit.
    175 var physPageSize uintptr
    176 
    177 // OS-defined helpers:
    178 //
    179 // sysAlloc obtains a large chunk of zeroed memory from the
    180 // operating system, typically on the order of a hundred kilobytes
    181 // or a megabyte.
    182 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
    183 // may use larger alignment, so the caller must be careful to realign the
    184 // memory obtained by sysAlloc.
    185 //
    186 // SysUnused notifies the operating system that the contents
    187 // of the memory region are no longer needed and can be reused
    188 // for other purposes.
    189 // SysUsed notifies the operating system that the contents
    190 // of the memory region are needed again.
    191 //
    192 // SysFree returns it unconditionally; this is only used if
    193 // an out-of-memory error has been detected midway through
    194 // an allocation. It is okay if SysFree is a no-op.
    195 //
    196 // SysReserve reserves address space without allocating memory.
    197 // If the pointer passed to it is non-nil, the caller wants the
    198 // reservation there, but SysReserve can still choose another
    199 // location if that one is unavailable. On some systems and in some
    200 // cases SysReserve will simply check that the address space is
    201 // available and not actually reserve it. If SysReserve returns
    202 // non-nil, it sets *reserved to true if the address space is
    203 // reserved, false if it has merely been checked.
    204 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
    205 // may use larger alignment, so the caller must be careful to realign the
    206 // memory obtained by sysAlloc.
    207 //
    208 // SysMap maps previously reserved address space for use.
    209 // The reserved argument is true if the address space was really
    210 // reserved, not merely checked.
    211 //
    212 // SysFault marks a (already sysAlloc'd) region to fault
    213 // if accessed. Used only for debugging the runtime.
    214 
    215 func mallocinit() {
    216 	if class_to_size[_TinySizeClass] != _TinySize {
    217 		throw("bad TinySizeClass")
    218 	}
    219 
    220 	testdefersizes()
    221 
    222 	// Copy class sizes out for statistics table.
    223 	for i := range class_to_size {
    224 		memstats.by_size[i].size = uint32(class_to_size[i])
    225 	}
    226 
    227 	// Check physPageSize.
    228 	if physPageSize == 0 {
    229 		// The OS init code failed to fetch the physical page size.
    230 		throw("failed to get system page size")
    231 	}
    232 	if physPageSize < minPhysPageSize {
    233 		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
    234 		throw("bad system page size")
    235 	}
    236 	if physPageSize&(physPageSize-1) != 0 {
    237 		print("system page size (", physPageSize, ") must be a power of 2\n")
    238 		throw("bad system page size")
    239 	}
    240 
    241 	var p, bitmapSize, spansSize, pSize, limit uintptr
    242 	var reserved bool
    243 
    244 	// limit = runtime.memlimit();
    245 	// See https://golang.org/issue/5049
    246 	// TODO(rsc): Fix after 1.1.
    247 	limit = 0
    248 
    249 	// Set up the allocation arena, a contiguous area of memory where
    250 	// allocated data will be found. The arena begins with a bitmap large
    251 	// enough to hold 2 bits per allocated word.
    252 	if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
    253 		// On a 64-bit machine, allocate from a single contiguous reservation.
    254 		// 512 GB (MaxMem) should be big enough for now.
    255 		//
    256 		// The code will work with the reservation at any address, but ask
    257 		// SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
    258 		// Allocating a 512 GB region takes away 39 bits, and the amd64
    259 		// doesn't let us choose the top 17 bits, so that leaves the 9 bits
    260 		// in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
    261 		// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
    262 		// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
    263 		// UTF-8 sequences, and they are otherwise as far away from
    264 		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
    265 		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
    266 		// on OS X during thread allocations.  0x00c0 causes conflicts with
    267 		// AddressSanitizer which reserves all memory up to 0x0100.
    268 		// These choices are both for debuggability and to reduce the
    269 		// odds of a conservative garbage collector (as is still used in gccgo)
    270 		// not collecting memory because some non-pointer block of memory
    271 		// had a bit pattern that matched a memory address.
    272 		//
    273 		// Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
    274 		// but it hardly matters: e0 00 is not valid UTF-8 either.
    275 		//
    276 		// If this fails we fall back to the 32 bit memory mechanism
    277 		//
    278 		// However, on arm64, we ignore all this advice above and slam the
    279 		// allocation at 0x40 << 32 because when using 4k pages with 3-level
    280 		// translation buffers, the user address space is limited to 39 bits
    281 		// On darwin/arm64, the address space is even smaller.
    282 		arenaSize := round(_MaxMem, _PageSize)
    283 		bitmapSize = arenaSize / (sys.PtrSize * 8 / 2)
    284 		spansSize = arenaSize / _PageSize * sys.PtrSize
    285 		spansSize = round(spansSize, _PageSize)
    286 		for i := 0; i <= 0x7f; i++ {
    287 			switch {
    288 			case GOARCH == "arm64" && GOOS == "darwin":
    289 				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
    290 			case GOARCH == "arm64":
    291 				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
    292 			default:
    293 				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
    294 			}
    295 			pSize = bitmapSize + spansSize + arenaSize + _PageSize
    296 			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
    297 			if p != 0 {
    298 				break
    299 			}
    300 		}
    301 	}
    302 
    303 	if p == 0 {
    304 		// On a 32-bit machine, we can't typically get away
    305 		// with a giant virtual address space reservation.
    306 		// Instead we map the memory information bitmap
    307 		// immediately after the data segment, large enough
    308 		// to handle the entire 4GB address space (256 MB),
    309 		// along with a reservation for an initial arena.
    310 		// When that gets used up, we'll start asking the kernel
    311 		// for any memory anywhere.
    312 
    313 		// If we fail to allocate, try again with a smaller arena.
    314 		// This is necessary on Android L where we share a process
    315 		// with ART, which reserves virtual memory aggressively.
    316 		// In the worst case, fall back to a 0-sized initial arena,
    317 		// in the hope that subsequent reservations will succeed.
    318 		arenaSizes := []uintptr{
    319 			512 << 20,
    320 			256 << 20,
    321 			128 << 20,
    322 			0,
    323 		}
    324 
    325 		for _, arenaSize := range arenaSizes {
    326 			bitmapSize = (_MaxArena32 + 1) / (sys.PtrSize * 8 / 2)
    327 			spansSize = (_MaxArena32 + 1) / _PageSize * sys.PtrSize
    328 			if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
    329 				bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
    330 				arenaSize = bitmapSize * 8
    331 				spansSize = arenaSize / _PageSize * sys.PtrSize
    332 			}
    333 			spansSize = round(spansSize, _PageSize)
    334 
    335 			// SysReserve treats the address we ask for, end, as a hint,
    336 			// not as an absolute requirement. If we ask for the end
    337 			// of the data segment but the operating system requires
    338 			// a little more space before we can start allocating, it will
    339 			// give out a slightly higher pointer. Except QEMU, which
    340 			// is buggy, as usual: it won't adjust the pointer upward.
    341 			// So adjust it upward a little bit ourselves: 1/4 MB to get
    342 			// away from the running binary image and then round up
    343 			// to a MB boundary.
    344 			p = round(firstmoduledata.end+(1<<18), 1<<20)
    345 			pSize = bitmapSize + spansSize + arenaSize + _PageSize
    346 			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
    347 			if p != 0 {
    348 				break
    349 			}
    350 		}
    351 		if p == 0 {
    352 			throw("runtime: cannot reserve arena virtual address space")
    353 		}
    354 	}
    355 
    356 	// PageSize can be larger than OS definition of page size,
    357 	// so SysReserve can give us a PageSize-unaligned pointer.
    358 	// To overcome this we ask for PageSize more and round up the pointer.
    359 	p1 := round(p, _PageSize)
    360 
    361 	spansStart := p1
    362 	mheap_.bitmap = p1 + spansSize + bitmapSize
    363 	if sys.PtrSize == 4 {
    364 		// Set arena_start such that we can accept memory
    365 		// reservations located anywhere in the 4GB virtual space.
    366 		mheap_.arena_start = 0
    367 	} else {
    368 		mheap_.arena_start = p1 + (spansSize + bitmapSize)
    369 	}
    370 	mheap_.arena_end = p + pSize
    371 	mheap_.arena_used = p1 + (spansSize + bitmapSize)
    372 	mheap_.arena_reserved = reserved
    373 
    374 	if mheap_.arena_start&(_PageSize-1) != 0 {
    375 		println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
    376 		throw("misrounded allocation in mallocinit")
    377 	}
    378 
    379 	// Initialize the rest of the allocator.
    380 	mheap_.init(spansStart, spansSize)
    381 	_g_ := getg()
    382 	_g_.m.mcache = allocmcache()
    383 }
    384 
    385 // sysAlloc allocates the next n bytes from the heap arena. The
    386 // returned pointer is always _PageSize aligned and between
    387 // h.arena_start and h.arena_end. sysAlloc returns nil on failure.
    388 // There is no corresponding free function.
    389 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
    390 	if n > h.arena_end-h.arena_used {
    391 		// We are in 32-bit mode, maybe we didn't use all possible address space yet.
    392 		// Reserve some more space.
    393 		p_size := round(n+_PageSize, 256<<20)
    394 		new_end := h.arena_end + p_size // Careful: can overflow
    395 		if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxArena32 {
    396 			// TODO: It would be bad if part of the arena
    397 			// is reserved and part is not.
    398 			var reserved bool
    399 			p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
    400 			if p == 0 {
    401 				return nil
    402 			}
    403 			if p == h.arena_end {
    404 				h.arena_end = new_end
    405 				h.arena_reserved = reserved
    406 			} else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxArena32 {
    407 				// Keep everything page-aligned.
    408 				// Our pages are bigger than hardware pages.
    409 				h.arena_end = p + p_size
    410 				used := p + (-p & (_PageSize - 1))
    411 				h.mapBits(used)
    412 				h.mapSpans(used)
    413 				h.arena_used = used
    414 				h.arena_reserved = reserved
    415 			} else {
    416 				// We haven't added this allocation to
    417 				// the stats, so subtract it from a
    418 				// fake stat (but avoid underflow).
    419 				stat := uint64(p_size)
    420 				sysFree(unsafe.Pointer(p), p_size, &stat)
    421 			}
    422 		}
    423 	}
    424 
    425 	if n <= h.arena_end-h.arena_used {
    426 		// Keep taking from our reservation.
    427 		p := h.arena_used
    428 		sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
    429 		h.mapBits(p + n)
    430 		h.mapSpans(p + n)
    431 		h.arena_used = p + n
    432 		if raceenabled {
    433 			racemapshadow(unsafe.Pointer(p), n)
    434 		}
    435 
    436 		if p&(_PageSize-1) != 0 {
    437 			throw("misrounded allocation in MHeap_SysAlloc")
    438 		}
    439 		return unsafe.Pointer(p)
    440 	}
    441 
    442 	// If using 64-bit, our reservation is all we have.
    443 	if h.arena_end-h.arena_start > _MaxArena32 {
    444 		return nil
    445 	}
    446 
    447 	// On 32-bit, once the reservation is gone we can
    448 	// try to get memory at a location chosen by the OS.
    449 	p_size := round(n, _PageSize) + _PageSize
    450 	p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
    451 	if p == 0 {
    452 		return nil
    453 	}
    454 
    455 	if p < h.arena_start || p+p_size-h.arena_start > _MaxArena32 {
    456 		top := ^uintptr(0)
    457 		if top-h.arena_start-1 > _MaxArena32 {
    458 			top = h.arena_start + _MaxArena32 + 1
    459 		}
    460 		print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
    461 		sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
    462 		return nil
    463 	}
    464 
    465 	p_end := p + p_size
    466 	p += -p & (_PageSize - 1)
    467 	if p+n > h.arena_used {
    468 		h.mapBits(p + n)
    469 		h.mapSpans(p + n)
    470 		h.arena_used = p + n
    471 		if p_end > h.arena_end {
    472 			h.arena_end = p_end
    473 		}
    474 		if raceenabled {
    475 			racemapshadow(unsafe.Pointer(p), n)
    476 		}
    477 	}
    478 
    479 	if p&(_PageSize-1) != 0 {
    480 		throw("misrounded allocation in MHeap_SysAlloc")
    481 	}
    482 	return unsafe.Pointer(p)
    483 }
    484 
    485 // base address for all 0-byte allocations
    486 var zerobase uintptr
    487 
    488 // nextFreeFast returns the next free object if one is quickly available.
    489 // Otherwise it returns 0.
    490 func nextFreeFast(s *mspan) gclinkptr {
    491 	theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
    492 	if theBit < 64 {
    493 		result := s.freeindex + uintptr(theBit)
    494 		if result < s.nelems {
    495 			freeidx := result + 1
    496 			if freeidx%64 == 0 && freeidx != s.nelems {
    497 				return 0
    498 			}
    499 			s.allocCache >>= (theBit + 1)
    500 			s.freeindex = freeidx
    501 			v := gclinkptr(result*s.elemsize + s.base())
    502 			s.allocCount++
    503 			return v
    504 		}
    505 	}
    506 	return 0
    507 }
    508 
    509 // nextFree returns the next free object from the cached span if one is available.
    510 // Otherwise it refills the cache with a span with an available object and
    511 // returns that object along with a flag indicating that this was a heavy
    512 // weight allocation. If it is a heavy weight allocation the caller must
    513 // determine whether a new GC cycle needs to be started or if the GC is active
    514 // whether this goroutine needs to assist the GC.
    515 func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc bool) {
    516 	s = c.alloc[sizeclass]
    517 	shouldhelpgc = false
    518 	freeIndex := s.nextFreeIndex()
    519 	if freeIndex == s.nelems {
    520 		// The span is full.
    521 		if uintptr(s.allocCount) != s.nelems {
    522 			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
    523 			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
    524 		}
    525 		systemstack(func() {
    526 			c.refill(int32(sizeclass))
    527 		})
    528 		shouldhelpgc = true
    529 		s = c.alloc[sizeclass]
    530 
    531 		freeIndex = s.nextFreeIndex()
    532 	}
    533 
    534 	if freeIndex >= s.nelems {
    535 		throw("freeIndex is not valid")
    536 	}
    537 
    538 	v = gclinkptr(freeIndex*s.elemsize + s.base())
    539 	s.allocCount++
    540 	if uintptr(s.allocCount) > s.nelems {
    541 		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
    542 		throw("s.allocCount > s.nelems")
    543 	}
    544 	return
    545 }
    546 
    547 // Allocate an object of size bytes.
    548 // Small objects are allocated from the per-P cache's free lists.
    549 // Large objects (> 32 kB) are allocated straight from the heap.
    550 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
    551 	if gcphase == _GCmarktermination {
    552 		throw("mallocgc called with gcphase == _GCmarktermination")
    553 	}
    554 
    555 	if size == 0 {
    556 		return unsafe.Pointer(&zerobase)
    557 	}
    558 
    559 	if debug.sbrk != 0 {
    560 		align := uintptr(16)
    561 		if typ != nil {
    562 			align = uintptr(typ.align)
    563 		}
    564 		return persistentalloc(size, align, &memstats.other_sys)
    565 	}
    566 
    567 	// assistG is the G to charge for this allocation, or nil if
    568 	// GC is not currently active.
    569 	var assistG *g
    570 	if gcBlackenEnabled != 0 {
    571 		// Charge the current user G for this allocation.
    572 		assistG = getg()
    573 		if assistG.m.curg != nil {
    574 			assistG = assistG.m.curg
    575 		}
    576 		// Charge the allocation against the G. We'll account
    577 		// for internal fragmentation at the end of mallocgc.
    578 		assistG.gcAssistBytes -= int64(size)
    579 
    580 		if assistG.gcAssistBytes < 0 {
    581 			// This G is in debt. Assist the GC to correct
    582 			// this before allocating. This must happen
    583 			// before disabling preemption.
    584 			gcAssistAlloc(assistG)
    585 		}
    586 	}
    587 
    588 	// Set mp.mallocing to keep from being preempted by GC.
    589 	mp := acquirem()
    590 	if mp.mallocing != 0 {
    591 		throw("malloc deadlock")
    592 	}
    593 	if mp.gsignal == getg() {
    594 		throw("malloc during signal")
    595 	}
    596 	mp.mallocing = 1
    597 
    598 	shouldhelpgc := false
    599 	dataSize := size
    600 	c := gomcache()
    601 	var x unsafe.Pointer
    602 	noscan := typ == nil || typ.kind&kindNoPointers != 0
    603 	if size <= maxSmallSize {
    604 		if noscan && size < maxTinySize {
    605 			// Tiny allocator.
    606 			//
    607 			// Tiny allocator combines several tiny allocation requests
    608 			// into a single memory block. The resulting memory block
    609 			// is freed when all subobjects are unreachable. The subobjects
    610 			// must be noscan (don't have pointers), this ensures that
    611 			// the amount of potentially wasted memory is bounded.
    612 			//
    613 			// Size of the memory block used for combining (maxTinySize) is tunable.
    614 			// Current setting is 16 bytes, which relates to 2x worst case memory
    615 			// wastage (when all but one subobjects are unreachable).
    616 			// 8 bytes would result in no wastage at all, but provides less
    617 			// opportunities for combining.
    618 			// 32 bytes provides more opportunities for combining,
    619 			// but can lead to 4x worst case wastage.
    620 			// The best case winning is 8x regardless of block size.
    621 			//
    622 			// Objects obtained from tiny allocator must not be freed explicitly.
    623 			// So when an object will be freed explicitly, we ensure that
    624 			// its size >= maxTinySize.
    625 			//
    626 			// SetFinalizer has a special case for objects potentially coming
    627 			// from tiny allocator, it such case it allows to set finalizers
    628 			// for an inner byte of a memory block.
    629 			//
    630 			// The main targets of tiny allocator are small strings and
    631 			// standalone escaping variables. On a json benchmark
    632 			// the allocator reduces number of allocations by ~12% and
    633 			// reduces heap size by ~20%.
    634 			off := c.tinyoffset
    635 			// Align tiny pointer for required (conservative) alignment.
    636 			if size&7 == 0 {
    637 				off = round(off, 8)
    638 			} else if size&3 == 0 {
    639 				off = round(off, 4)
    640 			} else if size&1 == 0 {
    641 				off = round(off, 2)
    642 			}
    643 			if off+size <= maxTinySize && c.tiny != 0 {
    644 				// The object fits into existing tiny block.
    645 				x = unsafe.Pointer(c.tiny + off)
    646 				c.tinyoffset = off + size
    647 				c.local_tinyallocs++
    648 				mp.mallocing = 0
    649 				releasem(mp)
    650 				return x
    651 			}
    652 			// Allocate a new maxTinySize block.
    653 			span := c.alloc[tinySizeClass]
    654 			v := nextFreeFast(span)
    655 			if v == 0 {
    656 				v, _, shouldhelpgc = c.nextFree(tinySizeClass)
    657 			}
    658 			x = unsafe.Pointer(v)
    659 			(*[2]uint64)(x)[0] = 0
    660 			(*[2]uint64)(x)[1] = 0
    661 			// See if we need to replace the existing tiny block with the new one
    662 			// based on amount of remaining free space.
    663 			if size < c.tinyoffset || c.tiny == 0 {
    664 				c.tiny = uintptr(x)
    665 				c.tinyoffset = size
    666 			}
    667 			size = maxTinySize
    668 		} else {
    669 			var sizeclass uint8
    670 			if size <= smallSizeMax-8 {
    671 				sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv]
    672 			} else {
    673 				sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
    674 			}
    675 			size = uintptr(class_to_size[sizeclass])
    676 			span := c.alloc[sizeclass]
    677 			v := nextFreeFast(span)
    678 			if v == 0 {
    679 				v, span, shouldhelpgc = c.nextFree(sizeclass)
    680 			}
    681 			x = unsafe.Pointer(v)
    682 			if needzero && span.needzero != 0 {
    683 				memclrNoHeapPointers(unsafe.Pointer(v), size)
    684 			}
    685 		}
    686 	} else {
    687 		var s *mspan
    688 		shouldhelpgc = true
    689 		systemstack(func() {
    690 			s = largeAlloc(size, needzero)
    691 		})
    692 		s.freeindex = 1
    693 		s.allocCount = 1
    694 		x = unsafe.Pointer(s.base())
    695 		size = s.elemsize
    696 	}
    697 
    698 	var scanSize uintptr
    699 	if noscan {
    700 		heapBitsSetTypeNoScan(uintptr(x))
    701 	} else {
    702 		// If allocating a defer+arg block, now that we've picked a malloc size
    703 		// large enough to hold everything, cut the "asked for" size down to
    704 		// just the defer header, so that the GC bitmap will record the arg block
    705 		// as containing nothing at all (as if it were unused space at the end of
    706 		// a malloc block caused by size rounding).
    707 		// The defer arg areas are scanned as part of scanstack.
    708 		if typ == deferType {
    709 			dataSize = unsafe.Sizeof(_defer{})
    710 		}
    711 		heapBitsSetType(uintptr(x), size, dataSize, typ)
    712 		if dataSize > typ.size {
    713 			// Array allocation. If there are any
    714 			// pointers, GC has to scan to the last
    715 			// element.
    716 			if typ.ptrdata != 0 {
    717 				scanSize = dataSize - typ.size + typ.ptrdata
    718 			}
    719 		} else {
    720 			scanSize = typ.ptrdata
    721 		}
    722 		c.local_scan += scanSize
    723 	}
    724 
    725 	// Ensure that the stores above that initialize x to
    726 	// type-safe memory and set the heap bits occur before
    727 	// the caller can make x observable to the garbage
    728 	// collector. Otherwise, on weakly ordered machines,
    729 	// the garbage collector could follow a pointer to x,
    730 	// but see uninitialized memory or stale heap bits.
    731 	publicationBarrier()
    732 
    733 	// Allocate black during GC.
    734 	// All slots hold nil so no scanning is needed.
    735 	// This may be racing with GC so do it atomically if there can be
    736 	// a race marking the bit.
    737 	if gcphase != _GCoff {
    738 		gcmarknewobject(uintptr(x), size, scanSize)
    739 	}
    740 
    741 	if raceenabled {
    742 		racemalloc(x, size)
    743 	}
    744 
    745 	if msanenabled {
    746 		msanmalloc(x, size)
    747 	}
    748 
    749 	mp.mallocing = 0
    750 	releasem(mp)
    751 
    752 	if debug.allocfreetrace != 0 {
    753 		tracealloc(x, size, typ)
    754 	}
    755 
    756 	if rate := MemProfileRate; rate > 0 {
    757 		if size < uintptr(rate) && int32(size) < c.next_sample {
    758 			c.next_sample -= int32(size)
    759 		} else {
    760 			mp := acquirem()
    761 			profilealloc(mp, x, size)
    762 			releasem(mp)
    763 		}
    764 	}
    765 
    766 	if assistG != nil {
    767 		// Account for internal fragmentation in the assist
    768 		// debt now that we know it.
    769 		assistG.gcAssistBytes -= int64(size - dataSize)
    770 	}
    771 
    772 	if shouldhelpgc && gcShouldStart(false) {
    773 		gcStart(gcBackgroundMode, false)
    774 	}
    775 
    776 	return x
    777 }
    778 
    779 func largeAlloc(size uintptr, needzero bool) *mspan {
    780 	// print("largeAlloc size=", size, "\n")
    781 
    782 	if size+_PageSize < size {
    783 		throw("out of memory")
    784 	}
    785 	npages := size >> _PageShift
    786 	if size&_PageMask != 0 {
    787 		npages++
    788 	}
    789 
    790 	// Deduct credit for this span allocation and sweep if
    791 	// necessary. mHeap_Alloc will also sweep npages, so this only
    792 	// pays the debt down to npage pages.
    793 	deductSweepCredit(npages*_PageSize, npages)
    794 
    795 	s := mheap_.alloc(npages, 0, true, needzero)
    796 	if s == nil {
    797 		throw("out of memory")
    798 	}
    799 	s.limit = s.base() + size
    800 	heapBitsForSpan(s.base()).initSpan(s)
    801 	return s
    802 }
    803 
    804 // implementation of new builtin
    805 // compiler (both frontend and SSA backend) knows the signature
    806 // of this function
    807 func newobject(typ *_type) unsafe.Pointer {
    808 	return mallocgc(typ.size, typ, true)
    809 }
    810 
    811 //go:linkname reflect_unsafe_New reflect.unsafe_New
    812 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    813 	return newobject(typ)
    814 }
    815 
    816 // newarray allocates an array of n elements of type typ.
    817 func newarray(typ *_type, n int) unsafe.Pointer {
    818 	if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
    819 		panic(plainError("runtime: allocation size out of range"))
    820 	}
    821 	return mallocgc(typ.size*uintptr(n), typ, true)
    822 }
    823 
    824 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    825 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    826 	return newarray(typ, n)
    827 }
    828 
    829 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    830 	mp.mcache.next_sample = nextSample()
    831 	mProf_Malloc(x, size)
    832 }
    833 
    834 // nextSample returns the next sampling point for heap profiling.
    835 // It produces a random variable with a geometric distribution and
    836 // mean MemProfileRate. This is done by generating a uniformly
    837 // distributed random number and applying the cumulative distribution
    838 // function for an exponential.
    839 func nextSample() int32 {
    840 	if GOOS == "plan9" {
    841 		// Plan 9 doesn't support floating point in note handler.
    842 		if g := getg(); g == g.m.gsignal {
    843 			return nextSampleNoFP()
    844 		}
    845 	}
    846 
    847 	period := MemProfileRate
    848 
    849 	// make nextSample not overflow. Maximum possible step is
    850 	// -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
    851 	switch {
    852 	case period > 0x7000000:
    853 		period = 0x7000000
    854 	case period == 0:
    855 		return 0
    856 	}
    857 
    858 	// Let m be the sample rate,
    859 	// the probability distribution function is m*exp(-mx), so the CDF is
    860 	// p = 1 - exp(-mx), so
    861 	// q = 1 - p == exp(-mx)
    862 	// log_e(q) = -mx
    863 	// -log_e(q)/m = x
    864 	// x = -log_e(q) * period
    865 	// x = log_2(q) * (-log_e(2)) * period    ; Using log_2 for efficiency
    866 	const randomBitCount = 26
    867 	q := fastrand()%(1<<randomBitCount) + 1
    868 	qlog := fastlog2(float64(q)) - randomBitCount
    869 	if qlog > 0 {
    870 		qlog = 0
    871 	}
    872 	const minusLog2 = -0.6931471805599453 // -ln(2)
    873 	return int32(qlog*(minusLog2*float64(period))) + 1
    874 }
    875 
    876 // nextSampleNoFP is similar to nextSample, but uses older,
    877 // simpler code to avoid floating point.
    878 func nextSampleNoFP() int32 {
    879 	// Set first allocation sample size.
    880 	rate := MemProfileRate
    881 	if rate > 0x3fffffff { // make 2*rate not overflow
    882 		rate = 0x3fffffff
    883 	}
    884 	if rate != 0 {
    885 		return int32(int(fastrand()) % (2 * rate))
    886 	}
    887 	return 0
    888 }
    889 
    890 type persistentAlloc struct {
    891 	base unsafe.Pointer
    892 	off  uintptr
    893 }
    894 
    895 var globalAlloc struct {
    896 	mutex
    897 	persistentAlloc
    898 }
    899 
    900 // Wrapper around sysAlloc that can allocate small chunks.
    901 // There is no associated free operation.
    902 // Intended for things like function/type/debug-related persistent data.
    903 // If align is 0, uses default align (currently 8).
    904 // The returned memory will be zeroed.
    905 //
    906 // Consider marking persistentalloc'd types go:notinheap.
    907 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
    908 	var p unsafe.Pointer
    909 	systemstack(func() {
    910 		p = persistentalloc1(size, align, sysStat)
    911 	})
    912 	return p
    913 }
    914 
    915 // Must run on system stack because stack growth can (re)invoke it.
    916 // See issue 9174.
    917 //go:systemstack
    918 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
    919 	const (
    920 		chunk    = 256 << 10
    921 		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    922 	)
    923 
    924 	if size == 0 {
    925 		throw("persistentalloc: size == 0")
    926 	}
    927 	if align != 0 {
    928 		if align&(align-1) != 0 {
    929 			throw("persistentalloc: align is not a power of 2")
    930 		}
    931 		if align > _PageSize {
    932 			throw("persistentalloc: align is too large")
    933 		}
    934 	} else {
    935 		align = 8
    936 	}
    937 
    938 	if size >= maxBlock {
    939 		return sysAlloc(size, sysStat)
    940 	}
    941 
    942 	mp := acquirem()
    943 	var persistent *persistentAlloc
    944 	if mp != nil && mp.p != 0 {
    945 		persistent = &mp.p.ptr().palloc
    946 	} else {
    947 		lock(&globalAlloc.mutex)
    948 		persistent = &globalAlloc.persistentAlloc
    949 	}
    950 	persistent.off = round(persistent.off, align)
    951 	if persistent.off+size > chunk || persistent.base == nil {
    952 		persistent.base = sysAlloc(chunk, &memstats.other_sys)
    953 		if persistent.base == nil {
    954 			if persistent == &globalAlloc.persistentAlloc {
    955 				unlock(&globalAlloc.mutex)
    956 			}
    957 			throw("runtime: cannot allocate memory")
    958 		}
    959 		persistent.off = 0
    960 	}
    961 	p := add(persistent.base, persistent.off)
    962 	persistent.off += size
    963 	releasem(mp)
    964 	if persistent == &globalAlloc.persistentAlloc {
    965 		unlock(&globalAlloc.mutex)
    966 	}
    967 
    968 	if sysStat != &memstats.other_sys {
    969 		mSysStatInc(sysStat, size)
    970 		mSysStatDec(&memstats.other_sys, size)
    971 	}
    972 	return p
    973 }
    974