Home | History | Annotate | Download | only in runtime
      1 // Copyright 2011 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/sys"
      9 	"unsafe"
     10 )
     11 
     12 type mOS struct{}
     13 
     14 //go:noescape
     15 func thr_new(param *thrparam, size int32)
     16 
     17 //go:noescape
     18 func sigaltstack(new, old *stackt)
     19 
     20 //go:noescape
     21 func sigaction(sig uint32, new, old *sigactiont)
     22 
     23 //go:noescape
     24 func sigprocmask(how int32, new, old *sigset)
     25 
     26 //go:noescape
     27 func setitimer(mode int32, new, old *itimerval)
     28 
     29 //go:noescape
     30 func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
     31 
     32 //go:noescape
     33 func getrlimit(kind int32, limit unsafe.Pointer) int32
     34 func raise(sig uint32)
     35 func raiseproc(sig uint32)
     36 
     37 //go:noescape
     38 func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_time) int32
     39 
     40 func osyield()
     41 
     42 // From FreeBSD's <sys/sysctl.h>
     43 const (
     44 	_CTL_HW      = 6
     45 	_HW_PAGESIZE = 7
     46 )
     47 
     48 var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
     49 
     50 // Undocumented numbers from FreeBSD's lib/libc/gen/sysctlnametomib.c.
     51 const (
     52 	_CTL_QUERY     = 0
     53 	_CTL_QUERY_MIB = 3
     54 )
     55 
     56 // sysctlnametomib fill mib with dynamically assigned sysctl entries of name,
     57 // return count of effected mib slots, return 0 on error.
     58 func sysctlnametomib(name []byte, mib *[_CTL_MAXNAME]uint32) uint32 {
     59 	oid := [2]uint32{_CTL_QUERY, _CTL_QUERY_MIB}
     60 	miblen := uintptr(_CTL_MAXNAME)
     61 	if sysctl(&oid[0], 2, (*byte)(unsafe.Pointer(mib)), &miblen, (*byte)(unsafe.Pointer(&name[0])), (uintptr)(len(name))) < 0 {
     62 		return 0
     63 	}
     64 	miblen /= unsafe.Sizeof(uint32(0))
     65 	if miblen <= 0 {
     66 		return 0
     67 	}
     68 	return uint32(miblen)
     69 }
     70 
     71 const (
     72 	_CPU_CURRENT_PID = -1 // Current process ID.
     73 )
     74 
     75 //go:noescape
     76 func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
     77 
     78 //go:systemstack
     79 func getncpu() int32 {
     80 	// Use a large buffer for the CPU mask. We're on the system
     81 	// stack, so this is fine, and we can't allocate memory for a
     82 	// dynamically-sized buffer at this point.
     83 	const maxCPUs = 64 * 1024
     84 	var mask [maxCPUs / 8]byte
     85 	var mib [_CTL_MAXNAME]uint32
     86 
     87 	// According to FreeBSD's /usr/src/sys/kern/kern_cpuset.c,
     88 	// cpuset_getaffinity return ERANGE when provided buffer size exceed the limits in kernel.
     89 	// Querying kern.smp.maxcpus to calculate maximum buffer size.
     90 	// See https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=200802
     91 
     92 	// Variable kern.smp.maxcpus introduced at Dec 23 2003, revision 123766,
     93 	// with dynamically assigned sysctl entries.
     94 	miblen := sysctlnametomib([]byte("kern.smp.maxcpus"), &mib)
     95 	if miblen == 0 {
     96 		return 1
     97 	}
     98 
     99 	// Query kern.smp.maxcpus.
    100 	dstsize := uintptr(4)
    101 	maxcpus := uint32(0)
    102 	if sysctl(&mib[0], miblen, (*byte)(unsafe.Pointer(&maxcpus)), &dstsize, nil, 0) != 0 {
    103 		return 1
    104 	}
    105 
    106 	maskSize := int(maxcpus+7) / 8
    107 	if maskSize < sys.PtrSize {
    108 		maskSize = sys.PtrSize
    109 	}
    110 	if maskSize > len(mask) {
    111 		maskSize = len(mask)
    112 	}
    113 
    114 	if cpuset_getaffinity(_CPU_LEVEL_WHICH, _CPU_WHICH_PID, _CPU_CURRENT_PID,
    115 		maskSize, (*byte)(unsafe.Pointer(&mask[0]))) != 0 {
    116 		return 1
    117 	}
    118 	n := int32(0)
    119 	for _, v := range mask[:maskSize] {
    120 		for v != 0 {
    121 			n += int32(v & 1)
    122 			v >>= 1
    123 		}
    124 	}
    125 	if n == 0 {
    126 		return 1
    127 	}
    128 	return n
    129 }
    130 
    131 func getPageSize() uintptr {
    132 	mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
    133 	out := uint32(0)
    134 	nout := unsafe.Sizeof(out)
    135 	ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
    136 	if ret >= 0 {
    137 		return uintptr(out)
    138 	}
    139 	return 0
    140 }
    141 
    142 // FreeBSD's umtx_op syscall is effectively the same as Linux's futex, and
    143 // thus the code is largely similar. See Linux implementation
    144 // and lock_futex.go for comments.
    145 
    146 //go:nosplit
    147 func futexsleep(addr *uint32, val uint32, ns int64) {
    148 	systemstack(func() {
    149 		futexsleep1(addr, val, ns)
    150 	})
    151 }
    152 
    153 func futexsleep1(addr *uint32, val uint32, ns int64) {
    154 	var utp *umtx_time
    155 	if ns >= 0 {
    156 		var ut umtx_time
    157 		ut._clockid = _CLOCK_MONOTONIC
    158 		ut._timeout.set_sec(int64(timediv(ns, 1000000000, (*int32)(unsafe.Pointer(&ut._timeout.tv_nsec)))))
    159 		utp = &ut
    160 	}
    161 	ret := sys_umtx_op(addr, _UMTX_OP_WAIT_UINT_PRIVATE, val, unsafe.Sizeof(*utp), utp)
    162 	if ret >= 0 || ret == -_EINTR {
    163 		return
    164 	}
    165 	print("umtx_wait addr=", addr, " val=", val, " ret=", ret, "\n")
    166 	*(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
    167 }
    168 
    169 //go:nosplit
    170 func futexwakeup(addr *uint32, cnt uint32) {
    171 	ret := sys_umtx_op(addr, _UMTX_OP_WAKE_PRIVATE, cnt, 0, nil)
    172 	if ret >= 0 {
    173 		return
    174 	}
    175 
    176 	systemstack(func() {
    177 		print("umtx_wake_addr=", addr, " ret=", ret, "\n")
    178 	})
    179 }
    180 
    181 func thr_start()
    182 
    183 // May run with m.p==nil, so write barriers are not allowed.
    184 //go:nowritebarrier
    185 func newosproc(mp *m, stk unsafe.Pointer) {
    186 	if false {
    187 		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", funcPC(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
    188 	}
    189 
    190 	// NOTE(rsc): This code is confused. stackbase is the top of the stack
    191 	// and is equal to stk. However, it's working, so I'm not changing it.
    192 	param := thrparam{
    193 		start_func: funcPC(thr_start),
    194 		arg:        unsafe.Pointer(mp),
    195 		stack_base: mp.g0.stack.hi,
    196 		stack_size: uintptr(stk) - mp.g0.stack.hi,
    197 		child_tid:  unsafe.Pointer(&mp.procid),
    198 		parent_tid: nil,
    199 		tls_base:   unsafe.Pointer(&mp.tls[0]),
    200 		tls_size:   unsafe.Sizeof(mp.tls),
    201 	}
    202 
    203 	var oset sigset
    204 	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
    205 	// TODO: Check for error.
    206 	thr_new(&param, int32(unsafe.Sizeof(param)))
    207 	sigprocmask(_SIG_SETMASK, &oset, nil)
    208 }
    209 
    210 func osinit() {
    211 	ncpu = getncpu()
    212 	physPageSize = getPageSize()
    213 }
    214 
    215 var urandom_dev = []byte("/dev/urandom\x00")
    216 
    217 //go:nosplit
    218 func getRandomData(r []byte) {
    219 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
    220 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
    221 	closefd(fd)
    222 	extendRandom(r, int(n))
    223 }
    224 
    225 func goenvs() {
    226 	goenvs_unix()
    227 }
    228 
    229 // Called to initialize a new m (including the bootstrap m).
    230 // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
    231 func mpreinit(mp *m) {
    232 	mp.gsignal = malg(32 * 1024)
    233 	mp.gsignal.m = mp
    234 }
    235 
    236 // Called to initialize a new m (including the bootstrap m).
    237 // Called on the new thread, cannot allocate memory.
    238 func minit() {
    239 	// m.procid is a uint64, but thr_new writes a uint32 on 32-bit systems.
    240 	// Fix it up. (Only matters on big-endian, but be clean anyway.)
    241 	if sys.PtrSize == 4 {
    242 		_g_ := getg()
    243 		_g_.m.procid = uint64(*(*uint32)(unsafe.Pointer(&_g_.m.procid)))
    244 	}
    245 
    246 	// On FreeBSD before about April 2017 there was a bug such
    247 	// that calling execve from a thread other than the main
    248 	// thread did not reset the signal stack. That would confuse
    249 	// minitSignals, which calls minitSignalStack, which checks
    250 	// whether there is currently a signal stack and uses it if
    251 	// present. To avoid this confusion, explicitly disable the
    252 	// signal stack on the main thread when not running in a
    253 	// library. This can be removed when we are confident that all
    254 	// FreeBSD users are running a patched kernel. See issue #15658.
    255 	if gp := getg(); !isarchive && !islibrary && gp.m == &m0 && gp == gp.m.g0 {
    256 		st := stackt{ss_flags: _SS_DISABLE}
    257 		sigaltstack(&st, nil)
    258 	}
    259 
    260 	minitSignals()
    261 }
    262 
    263 // Called from dropm to undo the effect of an minit.
    264 //go:nosplit
    265 func unminit() {
    266 	unminitSignals()
    267 }
    268 
    269 func memlimit() uintptr {
    270 	/*
    271 		TODO: Convert to Go when something actually uses the result.
    272 		Rlimit rl;
    273 		extern byte runtimetext[], runtimeend[];
    274 		uintptr used;
    275 
    276 		if(runtimegetrlimit(RLIMIT_AS, &rl) != 0)
    277 			return 0;
    278 		if(rl.rlim_cur >= 0x7fffffff)
    279 			return 0;
    280 
    281 		// Estimate our VM footprint excluding the heap.
    282 		// Not an exact science: use size of binary plus
    283 		// some room for thread stacks.
    284 		used = runtimeend - runtimetext + (64<<20);
    285 		if(used >= rl.rlim_cur)
    286 			return 0;
    287 
    288 		// If there's not at least 16 MB left, we're probably
    289 		// not going to be able to do much. Treat as no limit.
    290 		rl.rlim_cur -= used;
    291 		if(rl.rlim_cur < (16<<20))
    292 			return 0;
    293 
    294 		return rl.rlim_cur - used;
    295 	*/
    296 
    297 	return 0
    298 }
    299 
    300 func sigtramp()
    301 
    302 type sigactiont struct {
    303 	sa_handler uintptr
    304 	sa_flags   int32
    305 	sa_mask    sigset
    306 }
    307 
    308 //go:nosplit
    309 //go:nowritebarrierrec
    310 func setsig(i uint32, fn uintptr) {
    311 	var sa sigactiont
    312 	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
    313 	sa.sa_mask = sigset_all
    314 	if fn == funcPC(sighandler) {
    315 		fn = funcPC(sigtramp)
    316 	}
    317 	sa.sa_handler = fn
    318 	sigaction(i, &sa, nil)
    319 }
    320 
    321 //go:nosplit
    322 //go:nowritebarrierrec
    323 func setsigstack(i uint32) {
    324 	throw("setsigstack")
    325 }
    326 
    327 //go:nosplit
    328 //go:nowritebarrierrec
    329 func getsig(i uint32) uintptr {
    330 	var sa sigactiont
    331 	sigaction(i, nil, &sa)
    332 	return sa.sa_handler
    333 }
    334 
    335 // setSignaltstackSP sets the ss_sp field of a stackt.
    336 //go:nosplit
    337 func setSignalstackSP(s *stackt, sp uintptr) {
    338 	s.ss_sp = sp
    339 }
    340 
    341 //go:nosplit
    342 //go:nowritebarrierrec
    343 func sigaddset(mask *sigset, i int) {
    344 	mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
    345 }
    346 
    347 func sigdelset(mask *sigset, i int) {
    348 	mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
    349 }
    350 
    351 func (c *sigctxt) fixsigcode(sig uint32) {
    352 }
    353