Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Memory allocator.
      6 //
      7 // This was originally based on tcmalloc, but has diverged quite a bit.
      8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
      9 
     10 // The main allocator works in runs of pages.
     11 // Small allocation sizes (up to and including 32 kB) are
     12 // rounded to one of about 70 size classes, each of which
     13 // has its own free set of objects of exactly that size.
     14 // Any free page of memory can be split into a set of objects
     15 // of one size class, which are then managed using a free bitmap.
     16 //
     17 // The allocator's data structures are:
     18 //
     19 //	fixalloc: a free-list allocator for fixed-size off-heap objects,
     20 //		used to manage storage used by the allocator.
     21 //	mheap: the malloc heap, managed at page (8192-byte) granularity.
     22 //	mspan: a run of pages managed by the mheap.
     23 //	mcentral: collects all spans of a given size class.
     24 //	mcache: a per-P cache of mspans with free space.
     25 //	mstats: allocation statistics.
     26 //
     27 // Allocating a small object proceeds up a hierarchy of caches:
     28 //
     29 //	1. Round the size up to one of the small size classes
     30 //	   and look in the corresponding mspan in this P's mcache.
     31 //	   Scan the mspan's free bitmap to find a free slot.
     32 //	   If there is a free slot, allocate it.
     33 //	   This can all be done without acquiring a lock.
     34 //
     35 //	2. If the mspan has no free slots, obtain a new mspan
     36 //	   from the mcentral's list of mspans of the required size
     37 //	   class that have free space.
     38 //	   Obtaining a whole span amortizes the cost of locking
     39 //	   the mcentral.
     40 //
     41 //	3. If the mcentral's mspan list is empty, obtain a run
     42 //	   of pages from the mheap to use for the mspan.
     43 //
     44 //	4. If the mheap is empty or has no page runs large enough,
     45 //	   allocate a new group of pages (at least 1MB) from the
     46 //	   operating system. Allocating a large run of pages
     47 //	   amortizes the cost of talking to the operating system.
     48 //
     49 // Sweeping an mspan and freeing objects on it proceeds up a similar
     50 // hierarchy:
     51 //
     52 //	1. If the mspan is being swept in response to allocation, it
     53 //	   is returned to the mcache to satisfy the allocation.
     54 //
     55 //	2. Otherwise, if the mspan still has allocated objects in it,
     56 //	   it is placed on the mcentral free list for the mspan's size
     57 //	   class.
     58 //
     59 //	3. Otherwise, if all objects in the mspan are free, the mspan
     60 //	   is now "idle", so it is returned to the mheap and no longer
     61 //	   has a size class.
     62 //	   This may coalesce it with adjacent idle mspans.
     63 //
     64 //	4. If an mspan remains idle for long enough, return its pages
     65 //	   to the operating system.
     66 //
     67 // Allocating and freeing a large object uses the mheap
     68 // directly, bypassing the mcache and mcentral.
     69 //
     70 // Free object slots in an mspan are zeroed only if mspan.needzero is
     71 // false. If needzero is true, objects are zeroed as they are
     72 // allocated. There are various benefits to delaying zeroing this way:
     73 //
     74 //	1. Stack frame allocation can avoid zeroing altogether.
     75 //
     76 //	2. It exhibits better temporal locality, since the program is
     77 //	   probably about to write to the memory.
     78 //
     79 //	3. We don't zero pages that never get reused.
     80 
     81 package runtime
     82 
     83 import (
     84 	"runtime/internal/sys"
     85 	"unsafe"
     86 )
     87 
     88 const (
     89 	debugMalloc = false
     90 
     91 	maxTinySize   = _TinySize
     92 	tinySizeClass = _TinySizeClass
     93 	maxSmallSize  = _MaxSmallSize
     94 
     95 	pageShift = _PageShift
     96 	pageSize  = _PageSize
     97 	pageMask  = _PageMask
     98 	// By construction, single page spans of the smallest object class
     99 	// have the most objects per span.
    100 	maxObjsPerSpan = pageSize / 8
    101 
    102 	mSpanInUse = _MSpanInUse
    103 
    104 	concurrentSweep = _ConcurrentSweep
    105 
    106 	_PageSize = 1 << _PageShift
    107 	_PageMask = _PageSize - 1
    108 
    109 	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
    110 	_64bit = 1 << (^uintptr(0) >> 63) / 2
    111 
    112 	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
    113 	_TinySize      = 16
    114 	_TinySizeClass = int8(2)
    115 
    116 	_FixAllocChunk  = 16 << 10               // Chunk size for FixAlloc
    117 	_MaxMHeapList   = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
    118 	_HeapAllocChunk = 1 << 20                // Chunk size for heap growth
    119 
    120 	// Per-P, per order stack segment cache size.
    121 	_StackCacheSize = 32 * 1024
    122 
    123 	// Number of orders that get caching. Order 0 is FixedStack
    124 	// and each successive order is twice as large.
    125 	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
    126 	// will be allocated directly.
    127 	// Since FixedStack is different on different systems, we
    128 	// must vary NumStackOrders to keep the same maximum cached size.
    129 	//   OS               | FixedStack | NumStackOrders
    130 	//   -----------------+------------+---------------
    131 	//   linux/darwin/bsd | 2KB        | 4
    132 	//   windows/32       | 4KB        | 3
    133 	//   windows/64       | 8KB        | 2
    134 	//   plan9            | 4KB        | 3
    135 	_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
    136 
    137 	// Number of bits in page to span calculations (4k pages).
    138 	// On Windows 64-bit we limit the arena to 32GB or 35 bits.
    139 	// Windows counts memory used by page table into committed memory
    140 	// of the process, so we can't reserve too much memory.
    141 	// See https://golang.org/issue/5402 and https://golang.org/issue/5236.
    142 	// On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
    143 	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
    144 	// The only exception is mips32 which only has access to low 2GB of virtual memory.
    145 	// On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
    146 	// but as most devices have less than 4GB of physical memory anyway, we
    147 	// try to be conservative here, and only ask for a 2GB heap.
    148 	_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle))
    149 	_MHeapMap_Bits      = _MHeapMap_TotalBits - _PageShift
    150 
    151 	// _MaxMem is the maximum heap arena size minus 1.
    152 	//
    153 	// On 32-bit, this is also the maximum heap pointer value,
    154 	// since the arena starts at address 0.
    155 	_MaxMem = 1<<_MHeapMap_TotalBits - 1
    156 
    157 	// Max number of threads to run garbage collection.
    158 	// 2, 3, and 4 are all plausible maximums depending
    159 	// on the hardware details of the machine. The garbage
    160 	// collector scales well to 32 cpus.
    161 	_MaxGcproc = 32
    162 
    163 	// minLegalPointer is the smallest possible legal pointer.
    164 	// This is the smallest possible architectural page size,
    165 	// since we assume that the first page is never mapped.
    166 	//
    167 	// This should agree with minZeroPage in the compiler.
    168 	minLegalPointer uintptr = 4096
    169 )
    170 
    171 // physPageSize is the size in bytes of the OS's physical pages.
    172 // Mapping and unmapping operations must be done at multiples of
    173 // physPageSize.
    174 //
    175 // This must be set by the OS init code (typically in osinit) before
    176 // mallocinit.
    177 var physPageSize uintptr
    178 
    179 // OS-defined helpers:
    180 //
    181 // sysAlloc obtains a large chunk of zeroed memory from the
    182 // operating system, typically on the order of a hundred kilobytes
    183 // or a megabyte.
    184 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
    185 // may use larger alignment, so the caller must be careful to realign the
    186 // memory obtained by sysAlloc.
    187 //
    188 // SysUnused notifies the operating system that the contents
    189 // of the memory region are no longer needed and can be reused
    190 // for other purposes.
    191 // SysUsed notifies the operating system that the contents
    192 // of the memory region are needed again.
    193 //
    194 // SysFree returns it unconditionally; this is only used if
    195 // an out-of-memory error has been detected midway through
    196 // an allocation. It is okay if SysFree is a no-op.
    197 //
    198 // SysReserve reserves address space without allocating memory.
    199 // If the pointer passed to it is non-nil, the caller wants the
    200 // reservation there, but SysReserve can still choose another
    201 // location if that one is unavailable. On some systems and in some
    202 // cases SysReserve will simply check that the address space is
    203 // available and not actually reserve it. If SysReserve returns
    204 // non-nil, it sets *reserved to true if the address space is
    205 // reserved, false if it has merely been checked.
    206 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
    207 // may use larger alignment, so the caller must be careful to realign the
    208 // memory obtained by sysAlloc.
    209 //
    210 // SysMap maps previously reserved address space for use.
    211 // The reserved argument is true if the address space was really
    212 // reserved, not merely checked.
    213 //
    214 // SysFault marks a (already sysAlloc'd) region to fault
    215 // if accessed. Used only for debugging the runtime.
    216 
    217 func mallocinit() {
    218 	if class_to_size[_TinySizeClass] != _TinySize {
    219 		throw("bad TinySizeClass")
    220 	}
    221 
    222 	testdefersizes()
    223 
    224 	// Copy class sizes out for statistics table.
    225 	for i := range class_to_size {
    226 		memstats.by_size[i].size = uint32(class_to_size[i])
    227 	}
    228 
    229 	// Check physPageSize.
    230 	if physPageSize == 0 {
    231 		// The OS init code failed to fetch the physical page size.
    232 		throw("failed to get system page size")
    233 	}
    234 	if physPageSize < minPhysPageSize {
    235 		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
    236 		throw("bad system page size")
    237 	}
    238 	if physPageSize&(physPageSize-1) != 0 {
    239 		print("system page size (", physPageSize, ") must be a power of 2\n")
    240 		throw("bad system page size")
    241 	}
    242 
    243 	// The auxiliary regions start at p and are laid out in the
    244 	// following order: spans, bitmap, arena.
    245 	var p, pSize uintptr
    246 	var reserved bool
    247 
    248 	// The spans array holds one *mspan per _PageSize of arena.
    249 	var spansSize uintptr = (_MaxMem + 1) / _PageSize * sys.PtrSize
    250 	spansSize = round(spansSize, _PageSize)
    251 	// The bitmap holds 2 bits per word of arena.
    252 	var bitmapSize uintptr = (_MaxMem + 1) / (sys.PtrSize * 8 / 2)
    253 	bitmapSize = round(bitmapSize, _PageSize)
    254 
    255 	// Set up the allocation arena, a contiguous area of memory where
    256 	// allocated data will be found.
    257 	if sys.PtrSize == 8 {
    258 		// On a 64-bit machine, allocate from a single contiguous reservation.
    259 		// 512 GB (MaxMem) should be big enough for now.
    260 		//
    261 		// The code will work with the reservation at any address, but ask
    262 		// SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
    263 		// Allocating a 512 GB region takes away 39 bits, and the amd64
    264 		// doesn't let us choose the top 17 bits, so that leaves the 9 bits
    265 		// in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
    266 		// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
    267 		// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
    268 		// UTF-8 sequences, and they are otherwise as far away from
    269 		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
    270 		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
    271 		// on OS X during thread allocations.  0x00c0 causes conflicts with
    272 		// AddressSanitizer which reserves all memory up to 0x0100.
    273 		// These choices are both for debuggability and to reduce the
    274 		// odds of a conservative garbage collector (as is still used in gccgo)
    275 		// not collecting memory because some non-pointer block of memory
    276 		// had a bit pattern that matched a memory address.
    277 		//
    278 		// Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
    279 		// but it hardly matters: e0 00 is not valid UTF-8 either.
    280 		//
    281 		// If this fails we fall back to the 32 bit memory mechanism
    282 		//
    283 		// However, on arm64, we ignore all this advice above and slam the
    284 		// allocation at 0x40 << 32 because when using 4k pages with 3-level
    285 		// translation buffers, the user address space is limited to 39 bits
    286 		// On darwin/arm64, the address space is even smaller.
    287 		arenaSize := round(_MaxMem, _PageSize)
    288 		pSize = bitmapSize + spansSize + arenaSize + _PageSize
    289 		for i := 0; i <= 0x7f; i++ {
    290 			switch {
    291 			case GOARCH == "arm64" && GOOS == "darwin":
    292 				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
    293 			case GOARCH == "arm64":
    294 				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
    295 			default:
    296 				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
    297 			}
    298 			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
    299 			if p != 0 {
    300 				break
    301 			}
    302 		}
    303 	}
    304 
    305 	if p == 0 {
    306 		// On a 32-bit machine, we can't typically get away
    307 		// with a giant virtual address space reservation.
    308 		// Instead we map the memory information bitmap
    309 		// immediately after the data segment, large enough
    310 		// to handle the entire 4GB address space (256 MB),
    311 		// along with a reservation for an initial arena.
    312 		// When that gets used up, we'll start asking the kernel
    313 		// for any memory anywhere.
    314 
    315 		// We want to start the arena low, but if we're linked
    316 		// against C code, it's possible global constructors
    317 		// have called malloc and adjusted the process' brk.
    318 		// Query the brk so we can avoid trying to map the
    319 		// arena over it (which will cause the kernel to put
    320 		// the arena somewhere else, likely at a high
    321 		// address).
    322 		procBrk := sbrk0()
    323 
    324 		// If we fail to allocate, try again with a smaller arena.
    325 		// This is necessary on Android L where we share a process
    326 		// with ART, which reserves virtual memory aggressively.
    327 		// In the worst case, fall back to a 0-sized initial arena,
    328 		// in the hope that subsequent reservations will succeed.
    329 		arenaSizes := []uintptr{
    330 			512 << 20,
    331 			256 << 20,
    332 			128 << 20,
    333 			0,
    334 		}
    335 
    336 		for _, arenaSize := range arenaSizes {
    337 			// SysReserve treats the address we ask for, end, as a hint,
    338 			// not as an absolute requirement. If we ask for the end
    339 			// of the data segment but the operating system requires
    340 			// a little more space before we can start allocating, it will
    341 			// give out a slightly higher pointer. Except QEMU, which
    342 			// is buggy, as usual: it won't adjust the pointer upward.
    343 			// So adjust it upward a little bit ourselves: 1/4 MB to get
    344 			// away from the running binary image and then round up
    345 			// to a MB boundary.
    346 			p = round(firstmoduledata.end+(1<<18), 1<<20)
    347 			pSize = bitmapSize + spansSize + arenaSize + _PageSize
    348 			if p <= procBrk && procBrk < p+pSize {
    349 				// Move the start above the brk,
    350 				// leaving some room for future brk
    351 				// expansion.
    352 				p = round(procBrk+(1<<20), 1<<20)
    353 			}
    354 			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
    355 			if p != 0 {
    356 				break
    357 			}
    358 		}
    359 		if p == 0 {
    360 			throw("runtime: cannot reserve arena virtual address space")
    361 		}
    362 	}
    363 
    364 	// PageSize can be larger than OS definition of page size,
    365 	// so SysReserve can give us a PageSize-unaligned pointer.
    366 	// To overcome this we ask for PageSize more and round up the pointer.
    367 	p1 := round(p, _PageSize)
    368 	pSize -= p1 - p
    369 
    370 	spansStart := p1
    371 	p1 += spansSize
    372 	mheap_.bitmap = p1 + bitmapSize
    373 	p1 += bitmapSize
    374 	if sys.PtrSize == 4 {
    375 		// Set arena_start such that we can accept memory
    376 		// reservations located anywhere in the 4GB virtual space.
    377 		mheap_.arena_start = 0
    378 	} else {
    379 		mheap_.arena_start = p1
    380 	}
    381 	mheap_.arena_end = p + pSize
    382 	mheap_.arena_used = p1
    383 	mheap_.arena_alloc = p1
    384 	mheap_.arena_reserved = reserved
    385 
    386 	if mheap_.arena_start&(_PageSize-1) != 0 {
    387 		println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
    388 		throw("misrounded allocation in mallocinit")
    389 	}
    390 
    391 	// Initialize the rest of the allocator.
    392 	mheap_.init(spansStart, spansSize)
    393 	_g_ := getg()
    394 	_g_.m.mcache = allocmcache()
    395 }
    396 
    397 // sysAlloc allocates the next n bytes from the heap arena. The
    398 // returned pointer is always _PageSize aligned and between
    399 // h.arena_start and h.arena_end. sysAlloc returns nil on failure.
    400 // There is no corresponding free function.
    401 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
    402 	// strandLimit is the maximum number of bytes to strand from
    403 	// the current arena block. If we would need to strand more
    404 	// than this, we fall back to sysAlloc'ing just enough for
    405 	// this allocation.
    406 	const strandLimit = 16 << 20
    407 
    408 	if n > h.arena_end-h.arena_alloc {
    409 		// If we haven't grown the arena to _MaxMem yet, try
    410 		// to reserve some more address space.
    411 		p_size := round(n+_PageSize, 256<<20)
    412 		new_end := h.arena_end + p_size // Careful: can overflow
    413 		if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxMem {
    414 			// TODO: It would be bad if part of the arena
    415 			// is reserved and part is not.
    416 			var reserved bool
    417 			p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
    418 			if p == 0 {
    419 				// TODO: Try smaller reservation
    420 				// growths in case we're in a crowded
    421 				// 32-bit address space.
    422 				goto reservationFailed
    423 			}
    424 			// p can be just about anywhere in the address
    425 			// space, including before arena_end.
    426 			if p == h.arena_end {
    427 				// The new block is contiguous with
    428 				// the current block. Extend the
    429 				// current arena block.
    430 				h.arena_end = new_end
    431 				h.arena_reserved = reserved
    432 			} else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem && h.arena_end-h.arena_alloc < strandLimit {
    433 				// We were able to reserve more memory
    434 				// within the arena space, but it's
    435 				// not contiguous with our previous
    436 				// reservation. It could be before or
    437 				// after our current arena_used.
    438 				//
    439 				// Keep everything page-aligned.
    440 				// Our pages are bigger than hardware pages.
    441 				h.arena_end = p + p_size
    442 				p = round(p, _PageSize)
    443 				h.arena_alloc = p
    444 				h.arena_reserved = reserved
    445 			} else {
    446 				// We got a mapping, but either
    447 				//
    448 				// 1) It's not in the arena, so we
    449 				// can't use it. (This should never
    450 				// happen on 32-bit.)
    451 				//
    452 				// 2) We would need to discard too
    453 				// much of our current arena block to
    454 				// use it.
    455 				//
    456 				// We haven't added this allocation to
    457 				// the stats, so subtract it from a
    458 				// fake stat (but avoid underflow).
    459 				//
    460 				// We'll fall back to a small sysAlloc.
    461 				stat := uint64(p_size)
    462 				sysFree(unsafe.Pointer(p), p_size, &stat)
    463 			}
    464 		}
    465 	}
    466 
    467 	if n <= h.arena_end-h.arena_alloc {
    468 		// Keep taking from our reservation.
    469 		p := h.arena_alloc
    470 		sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
    471 		h.arena_alloc += n
    472 		if h.arena_alloc > h.arena_used {
    473 			h.setArenaUsed(h.arena_alloc, true)
    474 		}
    475 
    476 		if p&(_PageSize-1) != 0 {
    477 			throw("misrounded allocation in MHeap_SysAlloc")
    478 		}
    479 		return unsafe.Pointer(p)
    480 	}
    481 
    482 reservationFailed:
    483 	// If using 64-bit, our reservation is all we have.
    484 	if sys.PtrSize != 4 {
    485 		return nil
    486 	}
    487 
    488 	// On 32-bit, once the reservation is gone we can
    489 	// try to get memory at a location chosen by the OS.
    490 	p_size := round(n, _PageSize) + _PageSize
    491 	p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
    492 	if p == 0 {
    493 		return nil
    494 	}
    495 
    496 	if p < h.arena_start || p+p_size-h.arena_start > _MaxMem {
    497 		// This shouldn't be possible because _MaxMem is the
    498 		// whole address space on 32-bit.
    499 		top := uint64(h.arena_start) + _MaxMem
    500 		print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
    501 		sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
    502 		return nil
    503 	}
    504 
    505 	p += -p & (_PageSize - 1)
    506 	if p+n > h.arena_used {
    507 		h.setArenaUsed(p+n, true)
    508 	}
    509 
    510 	if p&(_PageSize-1) != 0 {
    511 		throw("misrounded allocation in MHeap_SysAlloc")
    512 	}
    513 	return unsafe.Pointer(p)
    514 }
    515 
    516 // base address for all 0-byte allocations
    517 var zerobase uintptr
    518 
    519 // nextFreeFast returns the next free object if one is quickly available.
    520 // Otherwise it returns 0.
    521 func nextFreeFast(s *mspan) gclinkptr {
    522 	theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
    523 	if theBit < 64 {
    524 		result := s.freeindex + uintptr(theBit)
    525 		if result < s.nelems {
    526 			freeidx := result + 1
    527 			if freeidx%64 == 0 && freeidx != s.nelems {
    528 				return 0
    529 			}
    530 			s.allocCache >>= uint(theBit + 1)
    531 			s.freeindex = freeidx
    532 			s.allocCount++
    533 			return gclinkptr(result*s.elemsize + s.base())
    534 		}
    535 	}
    536 	return 0
    537 }
    538 
    539 // nextFree returns the next free object from the cached span if one is available.
    540 // Otherwise it refills the cache with a span with an available object and
    541 // returns that object along with a flag indicating that this was a heavy
    542 // weight allocation. If it is a heavy weight allocation the caller must
    543 // determine whether a new GC cycle needs to be started or if the GC is active
    544 // whether this goroutine needs to assist the GC.
    545 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
    546 	s = c.alloc[spc]
    547 	shouldhelpgc = false
    548 	freeIndex := s.nextFreeIndex()
    549 	if freeIndex == s.nelems {
    550 		// The span is full.
    551 		if uintptr(s.allocCount) != s.nelems {
    552 			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
    553 			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
    554 		}
    555 		systemstack(func() {
    556 			c.refill(spc)
    557 		})
    558 		shouldhelpgc = true
    559 		s = c.alloc[spc]
    560 
    561 		freeIndex = s.nextFreeIndex()
    562 	}
    563 
    564 	if freeIndex >= s.nelems {
    565 		throw("freeIndex is not valid")
    566 	}
    567 
    568 	v = gclinkptr(freeIndex*s.elemsize + s.base())
    569 	s.allocCount++
    570 	if uintptr(s.allocCount) > s.nelems {
    571 		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
    572 		throw("s.allocCount > s.nelems")
    573 	}
    574 	return
    575 }
    576 
    577 // Allocate an object of size bytes.
    578 // Small objects are allocated from the per-P cache's free lists.
    579 // Large objects (> 32 kB) are allocated straight from the heap.
    580 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
    581 	if gcphase == _GCmarktermination {
    582 		throw("mallocgc called with gcphase == _GCmarktermination")
    583 	}
    584 
    585 	if size == 0 {
    586 		return unsafe.Pointer(&zerobase)
    587 	}
    588 
    589 	if debug.sbrk != 0 {
    590 		align := uintptr(16)
    591 		if typ != nil {
    592 			align = uintptr(typ.align)
    593 		}
    594 		return persistentalloc(size, align, &memstats.other_sys)
    595 	}
    596 
    597 	// assistG is the G to charge for this allocation, or nil if
    598 	// GC is not currently active.
    599 	var assistG *g
    600 	if gcBlackenEnabled != 0 {
    601 		// Charge the current user G for this allocation.
    602 		assistG = getg()
    603 		if assistG.m.curg != nil {
    604 			assistG = assistG.m.curg
    605 		}
    606 		// Charge the allocation against the G. We'll account
    607 		// for internal fragmentation at the end of mallocgc.
    608 		assistG.gcAssistBytes -= int64(size)
    609 
    610 		if assistG.gcAssistBytes < 0 {
    611 			// This G is in debt. Assist the GC to correct
    612 			// this before allocating. This must happen
    613 			// before disabling preemption.
    614 			gcAssistAlloc(assistG)
    615 		}
    616 	}
    617 
    618 	// Set mp.mallocing to keep from being preempted by GC.
    619 	mp := acquirem()
    620 	if mp.mallocing != 0 {
    621 		throw("malloc deadlock")
    622 	}
    623 	if mp.gsignal == getg() {
    624 		throw("malloc during signal")
    625 	}
    626 	mp.mallocing = 1
    627 
    628 	shouldhelpgc := false
    629 	dataSize := size
    630 	c := gomcache()
    631 	var x unsafe.Pointer
    632 	noscan := typ == nil || typ.kind&kindNoPointers != 0
    633 	if size <= maxSmallSize {
    634 		if noscan && size < maxTinySize {
    635 			// Tiny allocator.
    636 			//
    637 			// Tiny allocator combines several tiny allocation requests
    638 			// into a single memory block. The resulting memory block
    639 			// is freed when all subobjects are unreachable. The subobjects
    640 			// must be noscan (don't have pointers), this ensures that
    641 			// the amount of potentially wasted memory is bounded.
    642 			//
    643 			// Size of the memory block used for combining (maxTinySize) is tunable.
    644 			// Current setting is 16 bytes, which relates to 2x worst case memory
    645 			// wastage (when all but one subobjects are unreachable).
    646 			// 8 bytes would result in no wastage at all, but provides less
    647 			// opportunities for combining.
    648 			// 32 bytes provides more opportunities for combining,
    649 			// but can lead to 4x worst case wastage.
    650 			// The best case winning is 8x regardless of block size.
    651 			//
    652 			// Objects obtained from tiny allocator must not be freed explicitly.
    653 			// So when an object will be freed explicitly, we ensure that
    654 			// its size >= maxTinySize.
    655 			//
    656 			// SetFinalizer has a special case for objects potentially coming
    657 			// from tiny allocator, it such case it allows to set finalizers
    658 			// for an inner byte of a memory block.
    659 			//
    660 			// The main targets of tiny allocator are small strings and
    661 			// standalone escaping variables. On a json benchmark
    662 			// the allocator reduces number of allocations by ~12% and
    663 			// reduces heap size by ~20%.
    664 			off := c.tinyoffset
    665 			// Align tiny pointer for required (conservative) alignment.
    666 			if size&7 == 0 {
    667 				off = round(off, 8)
    668 			} else if size&3 == 0 {
    669 				off = round(off, 4)
    670 			} else if size&1 == 0 {
    671 				off = round(off, 2)
    672 			}
    673 			if off+size <= maxTinySize && c.tiny != 0 {
    674 				// The object fits into existing tiny block.
    675 				x = unsafe.Pointer(c.tiny + off)
    676 				c.tinyoffset = off + size
    677 				c.local_tinyallocs++
    678 				mp.mallocing = 0
    679 				releasem(mp)
    680 				return x
    681 			}
    682 			// Allocate a new maxTinySize block.
    683 			span := c.alloc[tinySpanClass]
    684 			v := nextFreeFast(span)
    685 			if v == 0 {
    686 				v, _, shouldhelpgc = c.nextFree(tinySpanClass)
    687 			}
    688 			x = unsafe.Pointer(v)
    689 			(*[2]uint64)(x)[0] = 0
    690 			(*[2]uint64)(x)[1] = 0
    691 			// See if we need to replace the existing tiny block with the new one
    692 			// based on amount of remaining free space.
    693 			if size < c.tinyoffset || c.tiny == 0 {
    694 				c.tiny = uintptr(x)
    695 				c.tinyoffset = size
    696 			}
    697 			size = maxTinySize
    698 		} else {
    699 			var sizeclass uint8
    700 			if size <= smallSizeMax-8 {
    701 				sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv]
    702 			} else {
    703 				sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
    704 			}
    705 			size = uintptr(class_to_size[sizeclass])
    706 			spc := makeSpanClass(sizeclass, noscan)
    707 			span := c.alloc[spc]
    708 			v := nextFreeFast(span)
    709 			if v == 0 {
    710 				v, span, shouldhelpgc = c.nextFree(spc)
    711 			}
    712 			x = unsafe.Pointer(v)
    713 			if needzero && span.needzero != 0 {
    714 				memclrNoHeapPointers(unsafe.Pointer(v), size)
    715 			}
    716 		}
    717 	} else {
    718 		var s *mspan
    719 		shouldhelpgc = true
    720 		systemstack(func() {
    721 			s = largeAlloc(size, needzero, noscan)
    722 		})
    723 		s.freeindex = 1
    724 		s.allocCount = 1
    725 		x = unsafe.Pointer(s.base())
    726 		size = s.elemsize
    727 	}
    728 
    729 	var scanSize uintptr
    730 	if !noscan {
    731 		// If allocating a defer+arg block, now that we've picked a malloc size
    732 		// large enough to hold everything, cut the "asked for" size down to
    733 		// just the defer header, so that the GC bitmap will record the arg block
    734 		// as containing nothing at all (as if it were unused space at the end of
    735 		// a malloc block caused by size rounding).
    736 		// The defer arg areas are scanned as part of scanstack.
    737 		if typ == deferType {
    738 			dataSize = unsafe.Sizeof(_defer{})
    739 		}
    740 		heapBitsSetType(uintptr(x), size, dataSize, typ)
    741 		if dataSize > typ.size {
    742 			// Array allocation. If there are any
    743 			// pointers, GC has to scan to the last
    744 			// element.
    745 			if typ.ptrdata != 0 {
    746 				scanSize = dataSize - typ.size + typ.ptrdata
    747 			}
    748 		} else {
    749 			scanSize = typ.ptrdata
    750 		}
    751 		c.local_scan += scanSize
    752 	}
    753 
    754 	// Ensure that the stores above that initialize x to
    755 	// type-safe memory and set the heap bits occur before
    756 	// the caller can make x observable to the garbage
    757 	// collector. Otherwise, on weakly ordered machines,
    758 	// the garbage collector could follow a pointer to x,
    759 	// but see uninitialized memory or stale heap bits.
    760 	publicationBarrier()
    761 
    762 	// Allocate black during GC.
    763 	// All slots hold nil so no scanning is needed.
    764 	// This may be racing with GC so do it atomically if there can be
    765 	// a race marking the bit.
    766 	if gcphase != _GCoff {
    767 		gcmarknewobject(uintptr(x), size, scanSize)
    768 	}
    769 
    770 	if raceenabled {
    771 		racemalloc(x, size)
    772 	}
    773 
    774 	if msanenabled {
    775 		msanmalloc(x, size)
    776 	}
    777 
    778 	mp.mallocing = 0
    779 	releasem(mp)
    780 
    781 	if debug.allocfreetrace != 0 {
    782 		tracealloc(x, size, typ)
    783 	}
    784 
    785 	if rate := MemProfileRate; rate > 0 {
    786 		if size < uintptr(rate) && int32(size) < c.next_sample {
    787 			c.next_sample -= int32(size)
    788 		} else {
    789 			mp := acquirem()
    790 			profilealloc(mp, x, size)
    791 			releasem(mp)
    792 		}
    793 	}
    794 
    795 	if assistG != nil {
    796 		// Account for internal fragmentation in the assist
    797 		// debt now that we know it.
    798 		assistG.gcAssistBytes -= int64(size - dataSize)
    799 	}
    800 
    801 	if shouldhelpgc {
    802 		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    803 			gcStart(gcBackgroundMode, t)
    804 		}
    805 	}
    806 
    807 	return x
    808 }
    809 
    810 func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
    811 	// print("largeAlloc size=", size, "\n")
    812 
    813 	if size+_PageSize < size {
    814 		throw("out of memory")
    815 	}
    816 	npages := size >> _PageShift
    817 	if size&_PageMask != 0 {
    818 		npages++
    819 	}
    820 
    821 	// Deduct credit for this span allocation and sweep if
    822 	// necessary. mHeap_Alloc will also sweep npages, so this only
    823 	// pays the debt down to npage pages.
    824 	deductSweepCredit(npages*_PageSize, npages)
    825 
    826 	s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero)
    827 	if s == nil {
    828 		throw("out of memory")
    829 	}
    830 	s.limit = s.base() + size
    831 	heapBitsForSpan(s.base()).initSpan(s)
    832 	return s
    833 }
    834 
    835 // implementation of new builtin
    836 // compiler (both frontend and SSA backend) knows the signature
    837 // of this function
    838 func newobject(typ *_type) unsafe.Pointer {
    839 	return mallocgc(typ.size, typ, true)
    840 }
    841 
    842 //go:linkname reflect_unsafe_New reflect.unsafe_New
    843 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    844 	return newobject(typ)
    845 }
    846 
    847 // newarray allocates an array of n elements of type typ.
    848 func newarray(typ *_type, n int) unsafe.Pointer {
    849 	if n == 1 {
    850 		return mallocgc(typ.size, typ, true)
    851 	}
    852 	if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
    853 		panic(plainError("runtime: allocation size out of range"))
    854 	}
    855 	return mallocgc(typ.size*uintptr(n), typ, true)
    856 }
    857 
    858 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    859 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    860 	return newarray(typ, n)
    861 }
    862 
    863 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    864 	mp.mcache.next_sample = nextSample()
    865 	mProf_Malloc(x, size)
    866 }
    867 
    868 // nextSample returns the next sampling point for heap profiling. The goal is
    869 // to sample allocations on average every MemProfileRate bytes, but with a
    870 // completely random distribution over the allocation timeline; this
    871 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
    872 // processes, the distance between two samples follows the exponential
    873 // distribution (exp(MemProfileRate)), so the best return value is a random
    874 // number taken from an exponential distribution whose mean is MemProfileRate.
    875 func nextSample() int32 {
    876 	if GOOS == "plan9" {
    877 		// Plan 9 doesn't support floating point in note handler.
    878 		if g := getg(); g == g.m.gsignal {
    879 			return nextSampleNoFP()
    880 		}
    881 	}
    882 
    883 	return fastexprand(MemProfileRate)
    884 }
    885 
    886 // fastexprand returns a random number from an exponential distribution with
    887 // the specified mean.
    888 func fastexprand(mean int) int32 {
    889 	// Avoid overflow. Maximum possible step is
    890 	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
    891 	switch {
    892 	case mean > 0x7000000:
    893 		mean = 0x7000000
    894 	case mean == 0:
    895 		return 0
    896 	}
    897 
    898 	// Take a random sample of the exponential distribution exp(-mean*x).
    899 	// The probability distribution function is mean*exp(-mean*x), so the CDF is
    900 	// p = 1 - exp(-mean*x), so
    901 	// q = 1 - p == exp(-mean*x)
    902 	// log_e(q) = -mean*x
    903 	// -log_e(q)/mean = x
    904 	// x = -log_e(q) * mean
    905 	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
    906 	const randomBitCount = 26
    907 	q := fastrand()%(1<<randomBitCount) + 1
    908 	qlog := fastlog2(float64(q)) - randomBitCount
    909 	if qlog > 0 {
    910 		qlog = 0
    911 	}
    912 	const minusLog2 = -0.6931471805599453 // -ln(2)
    913 	return int32(qlog*(minusLog2*float64(mean))) + 1
    914 }
    915 
    916 // nextSampleNoFP is similar to nextSample, but uses older,
    917 // simpler code to avoid floating point.
    918 func nextSampleNoFP() int32 {
    919 	// Set first allocation sample size.
    920 	rate := MemProfileRate
    921 	if rate > 0x3fffffff { // make 2*rate not overflow
    922 		rate = 0x3fffffff
    923 	}
    924 	if rate != 0 {
    925 		return int32(fastrand() % uint32(2*rate))
    926 	}
    927 	return 0
    928 }
    929 
    930 type persistentAlloc struct {
    931 	base *notInHeap
    932 	off  uintptr
    933 }
    934 
    935 var globalAlloc struct {
    936 	mutex
    937 	persistentAlloc
    938 }
    939 
    940 // Wrapper around sysAlloc that can allocate small chunks.
    941 // There is no associated free operation.
    942 // Intended for things like function/type/debug-related persistent data.
    943 // If align is 0, uses default align (currently 8).
    944 // The returned memory will be zeroed.
    945 //
    946 // Consider marking persistentalloc'd types go:notinheap.
    947 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
    948 	var p *notInHeap
    949 	systemstack(func() {
    950 		p = persistentalloc1(size, align, sysStat)
    951 	})
    952 	return unsafe.Pointer(p)
    953 }
    954 
    955 // Must run on system stack because stack growth can (re)invoke it.
    956 // See issue 9174.
    957 //go:systemstack
    958 func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
    959 	const (
    960 		chunk    = 256 << 10
    961 		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    962 	)
    963 
    964 	if size == 0 {
    965 		throw("persistentalloc: size == 0")
    966 	}
    967 	if align != 0 {
    968 		if align&(align-1) != 0 {
    969 			throw("persistentalloc: align is not a power of 2")
    970 		}
    971 		if align > _PageSize {
    972 			throw("persistentalloc: align is too large")
    973 		}
    974 	} else {
    975 		align = 8
    976 	}
    977 
    978 	if size >= maxBlock {
    979 		return (*notInHeap)(sysAlloc(size, sysStat))
    980 	}
    981 
    982 	mp := acquirem()
    983 	var persistent *persistentAlloc
    984 	if mp != nil && mp.p != 0 {
    985 		persistent = &mp.p.ptr().palloc
    986 	} else {
    987 		lock(&globalAlloc.mutex)
    988 		persistent = &globalAlloc.persistentAlloc
    989 	}
    990 	persistent.off = round(persistent.off, align)
    991 	if persistent.off+size > chunk || persistent.base == nil {
    992 		persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys))
    993 		if persistent.base == nil {
    994 			if persistent == &globalAlloc.persistentAlloc {
    995 				unlock(&globalAlloc.mutex)
    996 			}
    997 			throw("runtime: cannot allocate memory")
    998 		}
    999 		persistent.off = 0
   1000 	}
   1001 	p := persistent.base.add(persistent.off)
   1002 	persistent.off += size
   1003 	releasem(mp)
   1004 	if persistent == &globalAlloc.persistentAlloc {
   1005 		unlock(&globalAlloc.mutex)
   1006 	}
   1007 
   1008 	if sysStat != &memstats.other_sys {
   1009 		mSysStatInc(sysStat, size)
   1010 		mSysStatDec(&memstats.other_sys, size)
   1011 	}
   1012 	return p
   1013 }
   1014 
   1015 // notInHeap is off-heap memory allocated by a lower-level allocator
   1016 // like sysAlloc or persistentAlloc.
   1017 //
   1018 // In general, it's better to use real types marked as go:notinheap,
   1019 // but this serves as a generic type for situations where that isn't
   1020 // possible (like in the allocators).
   1021 //
   1022 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
   1023 //
   1024 //go:notinheap
   1025 type notInHeap struct{}
   1026 
   1027 func (p *notInHeap) add(bytes uintptr) *notInHeap {
   1028 	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
   1029 }
   1030