Home | History | Annotate | Download | only in runtime
      1 // Copyright 2010 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Export guts for testing.
      6 
      7 package runtime
      8 
      9 import (
     10 	"runtime/internal/atomic"
     11 	"runtime/internal/sys"
     12 	"unsafe"
     13 )
     14 
     15 var Fadd64 = fadd64
     16 var Fsub64 = fsub64
     17 var Fmul64 = fmul64
     18 var Fdiv64 = fdiv64
     19 var F64to32 = f64to32
     20 var F32to64 = f32to64
     21 var Fcmp64 = fcmp64
     22 var Fintto64 = fintto64
     23 var F64toint = f64toint
     24 var Sqrt = sqrt
     25 
     26 var Entersyscall = entersyscall
     27 var Exitsyscall = exitsyscall
     28 var LockedOSThread = lockedOSThread
     29 var Xadduintptr = atomic.Xadduintptr
     30 
     31 var FuncPC = funcPC
     32 
     33 var Fastlog2 = fastlog2
     34 
     35 var Atoi = atoi
     36 var Atoi32 = atoi32
     37 
     38 type LFNode struct {
     39 	Next    uint64
     40 	Pushcnt uintptr
     41 }
     42 
     43 func LFStackPush(head *uint64, node *LFNode) {
     44 	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
     45 }
     46 
     47 func LFStackPop(head *uint64) *LFNode {
     48 	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
     49 }
     50 
     51 func GCMask(x interface{}) (ret []byte) {
     52 	systemstack(func() {
     53 		ret = getgcmask(x)
     54 	})
     55 	return
     56 }
     57 
     58 func RunSchedLocalQueueTest() {
     59 	_p_ := new(p)
     60 	gs := make([]g, len(_p_.runq))
     61 	for i := 0; i < len(_p_.runq); i++ {
     62 		if g, _ := runqget(_p_); g != nil {
     63 			throw("runq is not empty initially")
     64 		}
     65 		for j := 0; j < i; j++ {
     66 			runqput(_p_, &gs[i], false)
     67 		}
     68 		for j := 0; j < i; j++ {
     69 			if g, _ := runqget(_p_); g != &gs[i] {
     70 				print("bad element at iter ", i, "/", j, "\n")
     71 				throw("bad element")
     72 			}
     73 		}
     74 		if g, _ := runqget(_p_); g != nil {
     75 			throw("runq is not empty afterwards")
     76 		}
     77 	}
     78 }
     79 
     80 func RunSchedLocalQueueStealTest() {
     81 	p1 := new(p)
     82 	p2 := new(p)
     83 	gs := make([]g, len(p1.runq))
     84 	for i := 0; i < len(p1.runq); i++ {
     85 		for j := 0; j < i; j++ {
     86 			gs[j].sig = 0
     87 			runqput(p1, &gs[j], false)
     88 		}
     89 		gp := runqsteal(p2, p1, true)
     90 		s := 0
     91 		if gp != nil {
     92 			s++
     93 			gp.sig++
     94 		}
     95 		for {
     96 			gp, _ = runqget(p2)
     97 			if gp == nil {
     98 				break
     99 			}
    100 			s++
    101 			gp.sig++
    102 		}
    103 		for {
    104 			gp, _ = runqget(p1)
    105 			if gp == nil {
    106 				break
    107 			}
    108 			gp.sig++
    109 		}
    110 		for j := 0; j < i; j++ {
    111 			if gs[j].sig != 1 {
    112 				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
    113 				throw("bad element")
    114 			}
    115 		}
    116 		if s != i/2 && s != i/2+1 {
    117 			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
    118 			throw("bad steal")
    119 		}
    120 	}
    121 }
    122 
    123 func RunSchedLocalQueueEmptyTest(iters int) {
    124 	// Test that runq is not spuriously reported as empty.
    125 	// Runq emptiness affects scheduling decisions and spurious emptiness
    126 	// can lead to underutilization (both runnable Gs and idle Ps coexist
    127 	// for arbitrary long time).
    128 	done := make(chan bool, 1)
    129 	p := new(p)
    130 	gs := make([]g, 2)
    131 	ready := new(uint32)
    132 	for i := 0; i < iters; i++ {
    133 		*ready = 0
    134 		next0 := (i & 1) == 0
    135 		next1 := (i & 2) == 0
    136 		runqput(p, &gs[0], next0)
    137 		go func() {
    138 			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
    139 			}
    140 			if runqempty(p) {
    141 				println("next:", next0, next1)
    142 				throw("queue is empty")
    143 			}
    144 			done <- true
    145 		}()
    146 		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
    147 		}
    148 		runqput(p, &gs[1], next1)
    149 		runqget(p)
    150 		<-done
    151 		runqget(p)
    152 	}
    153 }
    154 
    155 var (
    156 	StringHash = stringHash
    157 	BytesHash  = bytesHash
    158 	Int32Hash  = int32Hash
    159 	Int64Hash  = int64Hash
    160 	MemHash    = memhash
    161 	MemHash32  = memhash32
    162 	MemHash64  = memhash64
    163 	EfaceHash  = efaceHash
    164 	IfaceHash  = ifaceHash
    165 )
    166 
    167 var UseAeshash = &useAeshash
    168 
    169 func MemclrBytes(b []byte) {
    170 	s := (*slice)(unsafe.Pointer(&b))
    171 	memclrNoHeapPointers(s.array, uintptr(s.len))
    172 }
    173 
    174 var HashLoad = &hashLoad
    175 
    176 // entry point for testing
    177 func GostringW(w []uint16) (s string) {
    178 	systemstack(func() {
    179 		s = gostringw(&w[0])
    180 	})
    181 	return
    182 }
    183 
    184 type Uintreg sys.Uintreg
    185 
    186 var Open = open
    187 var Close = closefd
    188 var Read = read
    189 var Write = write
    190 
    191 func Envs() []string     { return envs }
    192 func SetEnvs(e []string) { envs = e }
    193 
    194 var BigEndian = sys.BigEndian
    195 
    196 // For benchmarking.
    197 
    198 func BenchSetType(n int, x interface{}) {
    199 	e := *efaceOf(&x)
    200 	t := e._type
    201 	var size uintptr
    202 	var p unsafe.Pointer
    203 	switch t.kind & kindMask {
    204 	case kindPtr:
    205 		t = (*ptrtype)(unsafe.Pointer(t)).elem
    206 		size = t.size
    207 		p = e.data
    208 	case kindSlice:
    209 		slice := *(*struct {
    210 			ptr      unsafe.Pointer
    211 			len, cap uintptr
    212 		})(e.data)
    213 		t = (*slicetype)(unsafe.Pointer(t)).elem
    214 		size = t.size * slice.len
    215 		p = slice.ptr
    216 	}
    217 	allocSize := roundupsize(size)
    218 	systemstack(func() {
    219 		for i := 0; i < n; i++ {
    220 			heapBitsSetType(uintptr(p), allocSize, size, t)
    221 		}
    222 	})
    223 }
    224 
    225 const PtrSize = sys.PtrSize
    226 
    227 var ForceGCPeriod = &forcegcperiod
    228 
    229 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
    230 // the "environment" traceback level, so later calls to
    231 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
    232 func SetTracebackEnv(level string) {
    233 	setTraceback(level)
    234 	traceback_env = traceback_cache
    235 }
    236 
    237 var ReadUnaligned32 = readUnaligned32
    238 var ReadUnaligned64 = readUnaligned64
    239 
    240 func CountPagesInUse() (pagesInUse, counted uintptr) {
    241 	stopTheWorld("CountPagesInUse")
    242 
    243 	pagesInUse = uintptr(mheap_.pagesInUse)
    244 
    245 	for _, s := range mheap_.allspans {
    246 		if s.state == mSpanInUse {
    247 			counted += s.npages
    248 		}
    249 	}
    250 
    251 	startTheWorld()
    252 
    253 	return
    254 }
    255 
    256 func Fastrand() uint32          { return fastrand() }
    257 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
    258 
    259 type ProfBuf profBuf
    260 
    261 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
    262 	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
    263 }
    264 
    265 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
    266 	(*profBuf)(p).write(tag, now, hdr, stk)
    267 }
    268 
    269 const (
    270 	ProfBufBlocking    = profBufBlocking
    271 	ProfBufNonBlocking = profBufNonBlocking
    272 )
    273 
    274 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
    275 	return (*profBuf)(p).read(profBufReadMode(mode))
    276 }
    277 
    278 func (p *ProfBuf) Close() {
    279 	(*profBuf)(p).close()
    280 }
    281 
    282 // ReadMemStatsSlow returns both the runtime-computed MemStats and
    283 // MemStats accumulated by scanning the heap.
    284 func ReadMemStatsSlow() (base, slow MemStats) {
    285 	stopTheWorld("ReadMemStatsSlow")
    286 
    287 	// Run on the system stack to avoid stack growth allocation.
    288 	systemstack(func() {
    289 		// Make sure stats don't change.
    290 		getg().m.mallocing++
    291 
    292 		readmemstats_m(&base)
    293 
    294 		// Initialize slow from base and zero the fields we're
    295 		// recomputing.
    296 		slow = base
    297 		slow.Alloc = 0
    298 		slow.TotalAlloc = 0
    299 		slow.Mallocs = 0
    300 		slow.Frees = 0
    301 		var bySize [_NumSizeClasses]struct {
    302 			Mallocs, Frees uint64
    303 		}
    304 
    305 		// Add up current allocations in spans.
    306 		for _, s := range mheap_.allspans {
    307 			if s.state != mSpanInUse {
    308 				continue
    309 			}
    310 			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
    311 				slow.Mallocs++
    312 				slow.Alloc += uint64(s.elemsize)
    313 			} else {
    314 				slow.Mallocs += uint64(s.allocCount)
    315 				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
    316 				bySize[sizeclass].Mallocs += uint64(s.allocCount)
    317 			}
    318 		}
    319 
    320 		// Add in frees. readmemstats_m flushed the cached stats, so
    321 		// these are up-to-date.
    322 		var smallFree uint64
    323 		slow.Frees = mheap_.nlargefree
    324 		for i := range mheap_.nsmallfree {
    325 			slow.Frees += mheap_.nsmallfree[i]
    326 			bySize[i].Frees = mheap_.nsmallfree[i]
    327 			bySize[i].Mallocs += mheap_.nsmallfree[i]
    328 			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
    329 		}
    330 		slow.Frees += memstats.tinyallocs
    331 		slow.Mallocs += slow.Frees
    332 
    333 		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
    334 
    335 		for i := range slow.BySize {
    336 			slow.BySize[i].Mallocs = bySize[i].Mallocs
    337 			slow.BySize[i].Frees = bySize[i].Frees
    338 		}
    339 
    340 		getg().m.mallocing--
    341 	})
    342 
    343 	startTheWorld()
    344 	return
    345 }
    346 
    347 // BlockOnSystemStack switches to the system stack, prints "x\n" to
    348 // stderr, and blocks in a stack containing
    349 // "runtime.blockOnSystemStackInternal".
    350 func BlockOnSystemStack() {
    351 	systemstack(blockOnSystemStackInternal)
    352 }
    353 
    354 func blockOnSystemStackInternal() {
    355 	print("x\n")
    356 	lock(&deadlock)
    357 	lock(&deadlock)
    358 }
    359 
    360 type RWMutex struct {
    361 	rw rwmutex
    362 }
    363 
    364 func (rw *RWMutex) RLock() {
    365 	rw.rw.rlock()
    366 }
    367 
    368 func (rw *RWMutex) RUnlock() {
    369 	rw.rw.runlock()
    370 }
    371 
    372 func (rw *RWMutex) Lock() {
    373 	rw.rw.lock()
    374 }
    375 
    376 func (rw *RWMutex) Unlock() {
    377 	rw.rw.unlock()
    378 }
    379 
    380 func MapBucketsCount(m map[int]int) int {
    381 	h := *(**hmap)(unsafe.Pointer(&m))
    382 	return 1 << h.B
    383 }
    384 
    385 func MapBucketsPointerIsNil(m map[int]int) bool {
    386 	h := *(**hmap)(unsafe.Pointer(&m))
    387 	return h.buckets == nil
    388 }
    389 
    390 func LockOSCounts() (external, internal uint32) {
    391 	g := getg()
    392 	if g.m.lockedExt+g.m.lockedInt == 0 {
    393 		if g.lockedm != 0 {
    394 			panic("lockedm on non-locked goroutine")
    395 		}
    396 	} else {
    397 		if g.lockedm == 0 {
    398 			panic("nil lockedm on locked goroutine")
    399 		}
    400 	}
    401 	return g.m.lockedExt, g.m.lockedInt
    402 }
    403 
    404 //go:noinline
    405 func TracebackSystemstack(stk []uintptr, i int) int {
    406 	if i == 0 {
    407 		pc, sp := getcallerpc(), getcallersp(unsafe.Pointer(&stk))
    408 		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
    409 	}
    410 	n := 0
    411 	systemstack(func() {
    412 		n = TracebackSystemstack(stk, i-1)
    413 	})
    414 	return n
    415 }
    416