Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/atomic"
      9 	"runtime/internal/sys"
     10 	"unsafe"
     11 )
     12 
     13 // Keep a cached value to make gotraceback fast,
     14 // since we call it on every call to gentraceback.
     15 // The cached value is a uint32 in which the low bits
     16 // are the "crash" and "all" settings and the remaining
     17 // bits are the traceback value (0 off, 1 on, 2 include system).
     18 const (
     19 	tracebackCrash = 1 << iota
     20 	tracebackAll
     21 	tracebackShift = iota
     22 )
     23 
     24 var traceback_cache uint32 = 2 << tracebackShift
     25 var traceback_env uint32
     26 
     27 // gotraceback returns the current traceback settings.
     28 //
     29 // If level is 0, suppress all tracebacks.
     30 // If level is 1, show tracebacks, but exclude runtime frames.
     31 // If level is 2, show tracebacks including runtime frames.
     32 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
     33 // If crash is set, crash (core dump, etc) after tracebacking.
     34 //
     35 //go:nosplit
     36 func gotraceback() (level int32, all, crash bool) {
     37 	_g_ := getg()
     38 	t := atomic.Load(&traceback_cache)
     39 	crash = t&tracebackCrash != 0
     40 	all = _g_.m.throwing > 0 || t&tracebackAll != 0
     41 	if _g_.m.traceback != 0 {
     42 		level = int32(_g_.m.traceback)
     43 	} else {
     44 		level = int32(t >> tracebackShift)
     45 	}
     46 	return
     47 }
     48 
     49 var (
     50 	argc int32
     51 	argv **byte
     52 )
     53 
     54 // nosplit for use in linux startup sysargs
     55 //go:nosplit
     56 func argv_index(argv **byte, i int32) *byte {
     57 	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
     58 }
     59 
     60 func args(c int32, v **byte) {
     61 	argc = c
     62 	argv = v
     63 	sysargs(c, v)
     64 }
     65 
     66 func goargs() {
     67 	if GOOS == "windows" {
     68 		return
     69 	}
     70 	argslice = make([]string, argc)
     71 	for i := int32(0); i < argc; i++ {
     72 		argslice[i] = gostringnocopy(argv_index(argv, i))
     73 	}
     74 }
     75 
     76 func goenvs_unix() {
     77 	// TODO(austin): ppc64 in dynamic linking mode doesn't
     78 	// guarantee env[] will immediately follow argv. Might cause
     79 	// problems.
     80 	n := int32(0)
     81 	for argv_index(argv, argc+1+n) != nil {
     82 		n++
     83 	}
     84 
     85 	envs = make([]string, n)
     86 	for i := int32(0); i < n; i++ {
     87 		envs[i] = gostring(argv_index(argv, argc+1+i))
     88 	}
     89 }
     90 
     91 func environ() []string {
     92 	return envs
     93 }
     94 
     95 // TODO: These should be locals in testAtomic64, but we don't 8-byte
     96 // align stack variables on 386.
     97 var test_z64, test_x64 uint64
     98 
     99 func testAtomic64() {
    100 	test_z64 = 42
    101 	test_x64 = 0
    102 	if atomic.Cas64(&test_z64, test_x64, 1) {
    103 		throw("cas64 failed")
    104 	}
    105 	if test_x64 != 0 {
    106 		throw("cas64 failed")
    107 	}
    108 	test_x64 = 42
    109 	if !atomic.Cas64(&test_z64, test_x64, 1) {
    110 		throw("cas64 failed")
    111 	}
    112 	if test_x64 != 42 || test_z64 != 1 {
    113 		throw("cas64 failed")
    114 	}
    115 	if atomic.Load64(&test_z64) != 1 {
    116 		throw("load64 failed")
    117 	}
    118 	atomic.Store64(&test_z64, (1<<40)+1)
    119 	if atomic.Load64(&test_z64) != (1<<40)+1 {
    120 		throw("store64 failed")
    121 	}
    122 	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
    123 		throw("xadd64 failed")
    124 	}
    125 	if atomic.Load64(&test_z64) != (2<<40)+2 {
    126 		throw("xadd64 failed")
    127 	}
    128 	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
    129 		throw("xchg64 failed")
    130 	}
    131 	if atomic.Load64(&test_z64) != (3<<40)+3 {
    132 		throw("xchg64 failed")
    133 	}
    134 }
    135 
    136 func check() {
    137 	var (
    138 		a     int8
    139 		b     uint8
    140 		c     int16
    141 		d     uint16
    142 		e     int32
    143 		f     uint32
    144 		g     int64
    145 		h     uint64
    146 		i, i1 float32
    147 		j, j1 float64
    148 		k, k1 unsafe.Pointer
    149 		l     *uint16
    150 		m     [4]byte
    151 	)
    152 	type x1t struct {
    153 		x uint8
    154 	}
    155 	type y1t struct {
    156 		x1 x1t
    157 		y  uint8
    158 	}
    159 	var x1 x1t
    160 	var y1 y1t
    161 
    162 	if unsafe.Sizeof(a) != 1 {
    163 		throw("bad a")
    164 	}
    165 	if unsafe.Sizeof(b) != 1 {
    166 		throw("bad b")
    167 	}
    168 	if unsafe.Sizeof(c) != 2 {
    169 		throw("bad c")
    170 	}
    171 	if unsafe.Sizeof(d) != 2 {
    172 		throw("bad d")
    173 	}
    174 	if unsafe.Sizeof(e) != 4 {
    175 		throw("bad e")
    176 	}
    177 	if unsafe.Sizeof(f) != 4 {
    178 		throw("bad f")
    179 	}
    180 	if unsafe.Sizeof(g) != 8 {
    181 		throw("bad g")
    182 	}
    183 	if unsafe.Sizeof(h) != 8 {
    184 		throw("bad h")
    185 	}
    186 	if unsafe.Sizeof(i) != 4 {
    187 		throw("bad i")
    188 	}
    189 	if unsafe.Sizeof(j) != 8 {
    190 		throw("bad j")
    191 	}
    192 	if unsafe.Sizeof(k) != sys.PtrSize {
    193 		throw("bad k")
    194 	}
    195 	if unsafe.Sizeof(l) != sys.PtrSize {
    196 		throw("bad l")
    197 	}
    198 	if unsafe.Sizeof(x1) != 1 {
    199 		throw("bad unsafe.Sizeof x1")
    200 	}
    201 	if unsafe.Offsetof(y1.y) != 1 {
    202 		throw("bad offsetof y1.y")
    203 	}
    204 	if unsafe.Sizeof(y1) != 2 {
    205 		throw("bad unsafe.Sizeof y1")
    206 	}
    207 
    208 	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
    209 		throw("bad timediv")
    210 	}
    211 
    212 	var z uint32
    213 	z = 1
    214 	if !atomic.Cas(&z, 1, 2) {
    215 		throw("cas1")
    216 	}
    217 	if z != 2 {
    218 		throw("cas2")
    219 	}
    220 
    221 	z = 4
    222 	if atomic.Cas(&z, 5, 6) {
    223 		throw("cas3")
    224 	}
    225 	if z != 4 {
    226 		throw("cas4")
    227 	}
    228 
    229 	z = 0xffffffff
    230 	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
    231 		throw("cas5")
    232 	}
    233 	if z != 0xfffffffe {
    234 		throw("cas6")
    235 	}
    236 
    237 	k = unsafe.Pointer(uintptr(0xfedcb123))
    238 	if sys.PtrSize == 8 {
    239 		k = unsafe.Pointer(uintptr(k) << 10)
    240 	}
    241 	if casp(&k, nil, nil) {
    242 		throw("casp1")
    243 	}
    244 	k1 = add(k, 1)
    245 	if !casp(&k, k, k1) {
    246 		throw("casp2")
    247 	}
    248 	if k != k1 {
    249 		throw("casp3")
    250 	}
    251 
    252 	m = [4]byte{1, 1, 1, 1}
    253 	atomic.Or8(&m[1], 0xf0)
    254 	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
    255 		throw("atomicor8")
    256 	}
    257 
    258 	m = [4]byte{0xff, 0xff, 0xff, 0xff}
    259 	atomic.And8(&m[1], 0x1)
    260 	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
    261 		throw("atomicand8")
    262 	}
    263 
    264 	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
    265 	if j == j {
    266 		throw("float64nan")
    267 	}
    268 	if !(j != j) {
    269 		throw("float64nan1")
    270 	}
    271 
    272 	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
    273 	if j == j1 {
    274 		throw("float64nan2")
    275 	}
    276 	if !(j != j1) {
    277 		throw("float64nan3")
    278 	}
    279 
    280 	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
    281 	if i == i {
    282 		throw("float32nan")
    283 	}
    284 	if i == i {
    285 		throw("float32nan1")
    286 	}
    287 
    288 	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
    289 	if i == i1 {
    290 		throw("float32nan2")
    291 	}
    292 	if i == i1 {
    293 		throw("float32nan3")
    294 	}
    295 
    296 	testAtomic64()
    297 
    298 	if _FixedStack != round2(_FixedStack) {
    299 		throw("FixedStack is not power-of-2")
    300 	}
    301 
    302 	if !checkASM() {
    303 		throw("assembly checks failed")
    304 	}
    305 }
    306 
    307 type dbgVar struct {
    308 	name  string
    309 	value *int32
    310 }
    311 
    312 // Holds variables parsed from GODEBUG env var,
    313 // except for "memprofilerate" since there is an
    314 // existing int var for that value, which may
    315 // already have an initial value.
    316 var debug struct {
    317 	allocfreetrace   int32
    318 	cgocheck         int32
    319 	efence           int32
    320 	gccheckmark      int32
    321 	gcpacertrace     int32
    322 	gcshrinkstackoff int32
    323 	gcrescanstacks   int32
    324 	gcstoptheworld   int32
    325 	gctrace          int32
    326 	invalidptr       int32
    327 	sbrk             int32
    328 	scavenge         int32
    329 	scheddetail      int32
    330 	schedtrace       int32
    331 }
    332 
    333 var dbgvars = []dbgVar{
    334 	{"allocfreetrace", &debug.allocfreetrace},
    335 	{"cgocheck", &debug.cgocheck},
    336 	{"efence", &debug.efence},
    337 	{"gccheckmark", &debug.gccheckmark},
    338 	{"gcpacertrace", &debug.gcpacertrace},
    339 	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
    340 	{"gcrescanstacks", &debug.gcrescanstacks},
    341 	{"gcstoptheworld", &debug.gcstoptheworld},
    342 	{"gctrace", &debug.gctrace},
    343 	{"invalidptr", &debug.invalidptr},
    344 	{"sbrk", &debug.sbrk},
    345 	{"scavenge", &debug.scavenge},
    346 	{"scheddetail", &debug.scheddetail},
    347 	{"schedtrace", &debug.schedtrace},
    348 }
    349 
    350 func parsedebugvars() {
    351 	// defaults
    352 	debug.cgocheck = 1
    353 	debug.invalidptr = 1
    354 
    355 	for p := gogetenv("GODEBUG"); p != ""; {
    356 		field := ""
    357 		i := index(p, ",")
    358 		if i < 0 {
    359 			field, p = p, ""
    360 		} else {
    361 			field, p = p[:i], p[i+1:]
    362 		}
    363 		i = index(field, "=")
    364 		if i < 0 {
    365 			continue
    366 		}
    367 		key, value := field[:i], field[i+1:]
    368 
    369 		// Update MemProfileRate directly here since it
    370 		// is int, not int32, and should only be updated
    371 		// if specified in GODEBUG.
    372 		if key == "memprofilerate" {
    373 			if n, ok := atoi(value); ok {
    374 				MemProfileRate = n
    375 			}
    376 		} else {
    377 			for _, v := range dbgvars {
    378 				if v.name == key {
    379 					if n, ok := atoi32(value); ok {
    380 						*v.value = n
    381 					}
    382 				}
    383 			}
    384 		}
    385 	}
    386 
    387 	setTraceback(gogetenv("GOTRACEBACK"))
    388 	traceback_env = traceback_cache
    389 }
    390 
    391 //go:linkname setTraceback runtime/debug.SetTraceback
    392 func setTraceback(level string) {
    393 	var t uint32
    394 	switch level {
    395 	case "none":
    396 		t = 0
    397 	case "single", "":
    398 		t = 1 << tracebackShift
    399 	case "all":
    400 		t = 1<<tracebackShift | tracebackAll
    401 	case "system":
    402 		t = 2<<tracebackShift | tracebackAll
    403 	case "crash":
    404 		t = 2<<tracebackShift | tracebackAll | tracebackCrash
    405 	default:
    406 		t = tracebackAll
    407 		if n, ok := atoi(level); ok && n == int(uint32(n)) {
    408 			t |= uint32(n) << tracebackShift
    409 		}
    410 	}
    411 	// when C owns the process, simply exit'ing the process on fatal errors
    412 	// and panics is surprising. Be louder and abort instead.
    413 	if islibrary || isarchive {
    414 		t |= tracebackCrash
    415 	}
    416 
    417 	t |= traceback_env
    418 
    419 	atomic.Store(&traceback_cache, t)
    420 }
    421 
    422 // Poor mans 64-bit division.
    423 // This is a very special function, do not use it if you are not sure what you are doing.
    424 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
    425 // Handles overflow in a time-specific manner.
    426 //go:nosplit
    427 func timediv(v int64, div int32, rem *int32) int32 {
    428 	res := int32(0)
    429 	for bit := 30; bit >= 0; bit-- {
    430 		if v >= int64(div)<<uint(bit) {
    431 			v = v - (int64(div) << uint(bit))
    432 			res += 1 << uint(bit)
    433 		}
    434 	}
    435 	if v >= int64(div) {
    436 		if rem != nil {
    437 			*rem = 0
    438 		}
    439 		return 0x7fffffff
    440 	}
    441 	if rem != nil {
    442 		*rem = int32(v)
    443 	}
    444 	return res
    445 }
    446 
    447 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
    448 
    449 //go:nosplit
    450 func acquirem() *m {
    451 	_g_ := getg()
    452 	_g_.m.locks++
    453 	return _g_.m
    454 }
    455 
    456 //go:nosplit
    457 func releasem(mp *m) {
    458 	_g_ := getg()
    459 	mp.locks--
    460 	if mp.locks == 0 && _g_.preempt {
    461 		// restore the preemption request in case we've cleared it in newstack
    462 		_g_.stackguard0 = stackPreempt
    463 	}
    464 }
    465 
    466 //go:nosplit
    467 func gomcache() *mcache {
    468 	return getg().m.mcache
    469 }
    470 
    471 //go:linkname reflect_typelinks reflect.typelinks
    472 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
    473 	modules := activeModules()
    474 	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
    475 	ret := [][]int32{modules[0].typelinks}
    476 	for _, md := range modules[1:] {
    477 		sections = append(sections, unsafe.Pointer(md.types))
    478 		ret = append(ret, md.typelinks)
    479 	}
    480 	return sections, ret
    481 }
    482 
    483 // reflect_resolveNameOff resolves a name offset from a base pointer.
    484 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
    485 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
    486 	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
    487 }
    488 
    489 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
    490 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
    491 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
    492 	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
    493 }
    494 
    495 // reflect_resolveTextOff resolves an function pointer offset from a base type.
    496 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
    497 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
    498 	return (*_type)(rtype).textOff(textOff(off))
    499 
    500 }
    501 
    502 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
    503 //go:linkname reflect_addReflectOff reflect.addReflectOff
    504 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
    505 	reflectOffsLock()
    506 	if reflectOffs.m == nil {
    507 		reflectOffs.m = make(map[int32]unsafe.Pointer)
    508 		reflectOffs.minv = make(map[unsafe.Pointer]int32)
    509 		reflectOffs.next = -1
    510 	}
    511 	id, found := reflectOffs.minv[ptr]
    512 	if !found {
    513 		id = reflectOffs.next
    514 		reflectOffs.next-- // use negative offsets as IDs to aid debugging
    515 		reflectOffs.m[id] = ptr
    516 		reflectOffs.minv[ptr] = id
    517 	}
    518 	reflectOffsUnlock()
    519 	return id
    520 }
    521