Home | History | Annotate | Download | only in runtime
      1 // Copyright 2010 The Go Authors.  All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import "unsafe"
      8 
      9 const (
     10 	_PAGE_SIZE = _PhysPageSize
     11 	_EACCES    = 13
     12 )
     13 
     14 // NOTE: vec must be just 1 byte long here.
     15 // Mincore returns ENOMEM if any of the pages are unmapped,
     16 // but we want to know that all of the pages are unmapped.
     17 // To make these the same, we can only ask about one page
     18 // at a time. See golang.org/issue/7476.
     19 var addrspace_vec [1]byte
     20 
     21 func addrspace_free(v unsafe.Pointer, n uintptr) bool {
     22 	var chunk uintptr
     23 	for off := uintptr(0); off < n; off += chunk {
     24 		chunk = _PAGE_SIZE * uintptr(len(addrspace_vec))
     25 		if chunk > (n - off) {
     26 			chunk = n - off
     27 		}
     28 		errval := mincore(unsafe.Pointer(uintptr(v)+off), chunk, &addrspace_vec[0])
     29 		// ENOMEM means unmapped, which is what we want.
     30 		// Anything else we assume means the pages are mapped.
     31 		if errval != -_ENOMEM {
     32 			return false
     33 		}
     34 	}
     35 	return true
     36 }
     37 
     38 func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint32) unsafe.Pointer {
     39 	p := mmap(v, n, prot, flags, fd, offset)
     40 	// On some systems, mmap ignores v without
     41 	// MAP_FIXED, so retry if the address space is free.
     42 	if p != v && addrspace_free(v, n) {
     43 		if uintptr(p) > 4096 {
     44 			munmap(p, n)
     45 		}
     46 		p = mmap(v, n, prot, flags|_MAP_FIXED, fd, offset)
     47 	}
     48 	return p
     49 }
     50 
     51 // Don't split the stack as this method may be invoked without a valid G, which
     52 // prevents us from allocating more stack.
     53 //go:nosplit
     54 func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
     55 	p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
     56 	if uintptr(p) < 4096 {
     57 		if uintptr(p) == _EACCES {
     58 			print("runtime: mmap: access denied\n")
     59 			exit(2)
     60 		}
     61 		if uintptr(p) == _EAGAIN {
     62 			print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
     63 			exit(2)
     64 		}
     65 		return nil
     66 	}
     67 	mSysStatInc(sysStat, n)
     68 	return p
     69 }
     70 
     71 func sysUnused(v unsafe.Pointer, n uintptr) {
     72 	var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
     73 	if s != 0 && (uintptr(v)%s != 0 || n%s != 0) {
     74 		// See issue 8832
     75 		// Linux kernel bug: https://bugzilla.kernel.org/show_bug.cgi?id=93111
     76 		// Mark the region as NOHUGEPAGE so the kernel's khugepaged
     77 		// doesn't undo our DONTNEED request.  khugepaged likes to migrate
     78 		// regions which are only partially mapped to huge pages, including
     79 		// regions with some DONTNEED marks.  That needlessly allocates physical
     80 		// memory for our DONTNEED regions.
     81 		madvise(v, n, _MADV_NOHUGEPAGE)
     82 	}
     83 	madvise(v, n, _MADV_DONTNEED)
     84 }
     85 
     86 func sysUsed(v unsafe.Pointer, n uintptr) {
     87 	if hugePageSize != 0 {
     88 		// Undo the NOHUGEPAGE marks from sysUnused.  There is no alignment check
     89 		// around this call as spans may have been merged in the interim.
     90 		// Note that this might enable huge pages for regions which were
     91 		// previously disabled.  Unfortunately there is no easy way to detect
     92 		// what the previous state was, and in any case we probably want huge
     93 		// pages to back our heap if the kernel can arrange that.
     94 		madvise(v, n, _MADV_HUGEPAGE)
     95 	}
     96 }
     97 
     98 // Don't split the stack as this function may be invoked without a valid G,
     99 // which prevents us from allocating more stack.
    100 //go:nosplit
    101 func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
    102 	mSysStatDec(sysStat, n)
    103 	munmap(v, n)
    104 }
    105 
    106 func sysFault(v unsafe.Pointer, n uintptr) {
    107 	mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
    108 }
    109 
    110 func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
    111 	// On 64-bit, people with ulimit -v set complain if we reserve too
    112 	// much address space.  Instead, assume that the reservation is okay
    113 	// if we can reserve at least 64K and check the assumption in SysMap.
    114 	// Only user-mode Linux (UML) rejects these requests.
    115 	if ptrSize == 8 && uint64(n) > 1<<32 {
    116 		p := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
    117 		if p != v {
    118 			if uintptr(p) >= 4096 {
    119 				munmap(p, 64<<10)
    120 			}
    121 			return nil
    122 		}
    123 		munmap(p, 64<<10)
    124 		*reserved = false
    125 		return v
    126 	}
    127 
    128 	p := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
    129 	if uintptr(p) < 4096 {
    130 		return nil
    131 	}
    132 	*reserved = true
    133 	return p
    134 }
    135 
    136 func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
    137 	mSysStatInc(sysStat, n)
    138 
    139 	// On 64-bit, we don't actually have v reserved, so tread carefully.
    140 	if !reserved {
    141 		p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
    142 		if uintptr(p) == _ENOMEM {
    143 			throw("runtime: out of memory")
    144 		}
    145 		if p != v {
    146 			print("runtime: address space conflict: map(", v, ") = ", p, "\n")
    147 			throw("runtime: address space conflict")
    148 		}
    149 		return
    150 	}
    151 
    152 	p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
    153 	if uintptr(p) == _ENOMEM {
    154 		throw("runtime: out of memory")
    155 	}
    156 	if p != v {
    157 		throw("runtime: cannot map pages in arena address space")
    158 	}
    159 }
    160