Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Malloc profiling.
      6 // Patterned after tcmalloc's algorithms; shorter code.
      7 
      8 package runtime
      9 
     10 import (
     11 	"unsafe"
     12 )
     13 
     14 // NOTE(rsc): Everything here could use cas if contention became an issue.
     15 var proflock mutex
     16 
     17 // All memory allocations are local and do not escape outside of the profiler.
     18 // The profiler is forbidden from referring to garbage-collected memory.
     19 
     20 const (
     21 	// profile types
     22 	memProfile bucketType = 1 + iota
     23 	blockProfile
     24 
     25 	// size of bucket hash table
     26 	buckHashSize = 179999
     27 
     28 	// max depth of stack to record in bucket
     29 	maxStack = 32
     30 )
     31 
     32 type bucketType int
     33 
     34 // A bucket holds per-call-stack profiling information.
     35 // The representation is a bit sleazy, inherited from C.
     36 // This struct defines the bucket header. It is followed in
     37 // memory by the stack words and then the actual record
     38 // data, either a memRecord or a blockRecord.
     39 //
     40 // Per-call-stack profiling information.
     41 // Lookup by hashing call stack into a linked-list hash table.
     42 type bucket struct {
     43 	next    *bucket
     44 	allnext *bucket
     45 	typ     bucketType // memBucket or blockBucket
     46 	hash    uintptr
     47 	size    uintptr
     48 	nstk    uintptr
     49 }
     50 
     51 // A memRecord is the bucket data for a bucket of type memProfile,
     52 // part of the memory profile.
     53 type memRecord struct {
     54 	// The following complex 3-stage scheme of stats accumulation
     55 	// is required to obtain a consistent picture of mallocs and frees
     56 	// for some point in time.
     57 	// The problem is that mallocs come in real time, while frees
     58 	// come only after a GC during concurrent sweeping. So if we would
     59 	// naively count them, we would get a skew toward mallocs.
     60 	//
     61 	// Mallocs are accounted in recent stats.
     62 	// Explicit frees are accounted in recent stats.
     63 	// GC frees are accounted in prev stats.
     64 	// After GC prev stats are added to final stats and
     65 	// recent stats are moved into prev stats.
     66 	allocs      uintptr
     67 	frees       uintptr
     68 	alloc_bytes uintptr
     69 	free_bytes  uintptr
     70 
     71 	// changes between next-to-last GC and last GC
     72 	prev_allocs      uintptr
     73 	prev_frees       uintptr
     74 	prev_alloc_bytes uintptr
     75 	prev_free_bytes  uintptr
     76 
     77 	// changes since last GC
     78 	recent_allocs      uintptr
     79 	recent_frees       uintptr
     80 	recent_alloc_bytes uintptr
     81 	recent_free_bytes  uintptr
     82 }
     83 
     84 // A blockRecord is the bucket data for a bucket of type blockProfile,
     85 // part of the blocking profile.
     86 type blockRecord struct {
     87 	count  int64
     88 	cycles int64
     89 }
     90 
     91 var (
     92 	mbuckets  *bucket // memory profile buckets
     93 	bbuckets  *bucket // blocking profile buckets
     94 	buckhash  *[179999]*bucket
     95 	bucketmem uintptr
     96 )
     97 
     98 // newBucket allocates a bucket with the given type and number of stack entries.
     99 func newBucket(typ bucketType, nstk int) *bucket {
    100 	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
    101 	switch typ {
    102 	default:
    103 		throw("invalid profile bucket type")
    104 	case memProfile:
    105 		size += unsafe.Sizeof(memRecord{})
    106 	case blockProfile:
    107 		size += unsafe.Sizeof(blockRecord{})
    108 	}
    109 
    110 	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
    111 	bucketmem += size
    112 	b.typ = typ
    113 	b.nstk = uintptr(nstk)
    114 	return b
    115 }
    116 
    117 // stk returns the slice in b holding the stack.
    118 func (b *bucket) stk() []uintptr {
    119 	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
    120 	return stk[:b.nstk:b.nstk]
    121 }
    122 
    123 // mp returns the memRecord associated with the memProfile bucket b.
    124 func (b *bucket) mp() *memRecord {
    125 	if b.typ != memProfile {
    126 		throw("bad use of bucket.mp")
    127 	}
    128 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    129 	return (*memRecord)(data)
    130 }
    131 
    132 // bp returns the blockRecord associated with the blockProfile bucket b.
    133 func (b *bucket) bp() *blockRecord {
    134 	if b.typ != blockProfile {
    135 		throw("bad use of bucket.bp")
    136 	}
    137 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    138 	return (*blockRecord)(data)
    139 }
    140 
    141 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
    142 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
    143 	if buckhash == nil {
    144 		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
    145 		if buckhash == nil {
    146 			throw("runtime: cannot allocate memory")
    147 		}
    148 	}
    149 
    150 	// Hash stack.
    151 	var h uintptr
    152 	for _, pc := range stk {
    153 		h += pc
    154 		h += h << 10
    155 		h ^= h >> 6
    156 	}
    157 	// hash in size
    158 	h += size
    159 	h += h << 10
    160 	h ^= h >> 6
    161 	// finalize
    162 	h += h << 3
    163 	h ^= h >> 11
    164 
    165 	i := int(h % buckHashSize)
    166 	for b := buckhash[i]; b != nil; b = b.next {
    167 		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
    168 			return b
    169 		}
    170 	}
    171 
    172 	if !alloc {
    173 		return nil
    174 	}
    175 
    176 	// Create new bucket.
    177 	b := newBucket(typ, len(stk))
    178 	copy(b.stk(), stk)
    179 	b.hash = h
    180 	b.size = size
    181 	b.next = buckhash[i]
    182 	buckhash[i] = b
    183 	if typ == memProfile {
    184 		b.allnext = mbuckets
    185 		mbuckets = b
    186 	} else {
    187 		b.allnext = bbuckets
    188 		bbuckets = b
    189 	}
    190 	return b
    191 }
    192 
    193 func eqslice(x, y []uintptr) bool {
    194 	if len(x) != len(y) {
    195 		return false
    196 	}
    197 	for i, xi := range x {
    198 		if xi != y[i] {
    199 			return false
    200 		}
    201 	}
    202 	return true
    203 }
    204 
    205 func mprof_GC() {
    206 	for b := mbuckets; b != nil; b = b.allnext {
    207 		mp := b.mp()
    208 		mp.allocs += mp.prev_allocs
    209 		mp.frees += mp.prev_frees
    210 		mp.alloc_bytes += mp.prev_alloc_bytes
    211 		mp.free_bytes += mp.prev_free_bytes
    212 
    213 		mp.prev_allocs = mp.recent_allocs
    214 		mp.prev_frees = mp.recent_frees
    215 		mp.prev_alloc_bytes = mp.recent_alloc_bytes
    216 		mp.prev_free_bytes = mp.recent_free_bytes
    217 
    218 		mp.recent_allocs = 0
    219 		mp.recent_frees = 0
    220 		mp.recent_alloc_bytes = 0
    221 		mp.recent_free_bytes = 0
    222 	}
    223 }
    224 
    225 // Record that a gc just happened: all the 'recent' statistics are now real.
    226 func mProf_GC() {
    227 	lock(&proflock)
    228 	mprof_GC()
    229 	unlock(&proflock)
    230 }
    231 
    232 // Called by malloc to record a profiled block.
    233 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
    234 	var stk [maxStack]uintptr
    235 	nstk := callers(4, stk[:])
    236 	lock(&proflock)
    237 	b := stkbucket(memProfile, size, stk[:nstk], true)
    238 	mp := b.mp()
    239 	mp.recent_allocs++
    240 	mp.recent_alloc_bytes += size
    241 	unlock(&proflock)
    242 
    243 	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
    244 	// This reduces potential contention and chances of deadlocks.
    245 	// Since the object must be alive during call to mProf_Malloc,
    246 	// it's fine to do this non-atomically.
    247 	systemstack(func() {
    248 		setprofilebucket(p, b)
    249 	})
    250 }
    251 
    252 // Called when freeing a profiled block.
    253 func mProf_Free(b *bucket, size uintptr, freed bool) {
    254 	lock(&proflock)
    255 	mp := b.mp()
    256 	if freed {
    257 		mp.recent_frees++
    258 		mp.recent_free_bytes += size
    259 	} else {
    260 		mp.prev_frees++
    261 		mp.prev_free_bytes += size
    262 	}
    263 	unlock(&proflock)
    264 }
    265 
    266 var blockprofilerate uint64 // in CPU ticks
    267 
    268 // SetBlockProfileRate controls the fraction of goroutine blocking events
    269 // that are reported in the blocking profile.  The profiler aims to sample
    270 // an average of one blocking event per rate nanoseconds spent blocked.
    271 //
    272 // To include every blocking event in the profile, pass rate = 1.
    273 // To turn off profiling entirely, pass rate <= 0.
    274 func SetBlockProfileRate(rate int) {
    275 	var r int64
    276 	if rate <= 0 {
    277 		r = 0 // disable profiling
    278 	} else if rate == 1 {
    279 		r = 1 // profile everything
    280 	} else {
    281 		// convert ns to cycles, use float64 to prevent overflow during multiplication
    282 		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
    283 		if r == 0 {
    284 			r = 1
    285 		}
    286 	}
    287 
    288 	atomicstore64(&blockprofilerate, uint64(r))
    289 }
    290 
    291 func blockevent(cycles int64, skip int) {
    292 	if cycles <= 0 {
    293 		cycles = 1
    294 	}
    295 	rate := int64(atomicload64(&blockprofilerate))
    296 	if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
    297 		return
    298 	}
    299 	gp := getg()
    300 	var nstk int
    301 	var stk [maxStack]uintptr
    302 	if gp.m.curg == nil || gp.m.curg == gp {
    303 		nstk = callers(skip, stk[:])
    304 	} else {
    305 		nstk = gcallers(gp.m.curg, skip, stk[:])
    306 	}
    307 	lock(&proflock)
    308 	b := stkbucket(blockProfile, 0, stk[:nstk], true)
    309 	b.bp().count++
    310 	b.bp().cycles += cycles
    311 	unlock(&proflock)
    312 }
    313 
    314 // Go interface to profile data.
    315 
    316 // A StackRecord describes a single execution stack.
    317 type StackRecord struct {
    318 	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
    319 }
    320 
    321 // Stack returns the stack trace associated with the record,
    322 // a prefix of r.Stack0.
    323 func (r *StackRecord) Stack() []uintptr {
    324 	for i, v := range r.Stack0 {
    325 		if v == 0 {
    326 			return r.Stack0[0:i]
    327 		}
    328 	}
    329 	return r.Stack0[0:]
    330 }
    331 
    332 // MemProfileRate controls the fraction of memory allocations
    333 // that are recorded and reported in the memory profile.
    334 // The profiler aims to sample an average of
    335 // one allocation per MemProfileRate bytes allocated.
    336 //
    337 // To include every allocated block in the profile, set MemProfileRate to 1.
    338 // To turn off profiling entirely, set MemProfileRate to 0.
    339 //
    340 // The tools that process the memory profiles assume that the
    341 // profile rate is constant across the lifetime of the program
    342 // and equal to the current value.  Programs that change the
    343 // memory profiling rate should do so just once, as early as
    344 // possible in the execution of the program (for example,
    345 // at the beginning of main).
    346 var MemProfileRate int = 512 * 1024
    347 
    348 // A MemProfileRecord describes the live objects allocated
    349 // by a particular call sequence (stack trace).
    350 type MemProfileRecord struct {
    351 	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
    352 	AllocObjects, FreeObjects int64       // number of objects allocated, freed
    353 	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
    354 }
    355 
    356 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
    357 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
    358 
    359 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
    360 func (r *MemProfileRecord) InUseObjects() int64 {
    361 	return r.AllocObjects - r.FreeObjects
    362 }
    363 
    364 // Stack returns the stack trace associated with the record,
    365 // a prefix of r.Stack0.
    366 func (r *MemProfileRecord) Stack() []uintptr {
    367 	for i, v := range r.Stack0 {
    368 		if v == 0 {
    369 			return r.Stack0[0:i]
    370 		}
    371 	}
    372 	return r.Stack0[0:]
    373 }
    374 
    375 // MemProfile returns n, the number of records in the current memory profile.
    376 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
    377 // If len(p) < n, MemProfile does not change p and returns n, false.
    378 //
    379 // If inuseZero is true, the profile includes allocation records
    380 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
    381 // These are sites where memory was allocated, but it has all
    382 // been released back to the runtime.
    383 //
    384 // Most clients should use the runtime/pprof package or
    385 // the testing package's -test.memprofile flag instead
    386 // of calling MemProfile directly.
    387 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
    388 	lock(&proflock)
    389 	clear := true
    390 	for b := mbuckets; b != nil; b = b.allnext {
    391 		mp := b.mp()
    392 		if inuseZero || mp.alloc_bytes != mp.free_bytes {
    393 			n++
    394 		}
    395 		if mp.allocs != 0 || mp.frees != 0 {
    396 			clear = false
    397 		}
    398 	}
    399 	if clear {
    400 		// Absolutely no data, suggesting that a garbage collection
    401 		// has not yet happened. In order to allow profiling when
    402 		// garbage collection is disabled from the beginning of execution,
    403 		// accumulate stats as if a GC just happened, and recount buckets.
    404 		mprof_GC()
    405 		mprof_GC()
    406 		n = 0
    407 		for b := mbuckets; b != nil; b = b.allnext {
    408 			mp := b.mp()
    409 			if inuseZero || mp.alloc_bytes != mp.free_bytes {
    410 				n++
    411 			}
    412 		}
    413 	}
    414 	if n <= len(p) {
    415 		ok = true
    416 		idx := 0
    417 		for b := mbuckets; b != nil; b = b.allnext {
    418 			mp := b.mp()
    419 			if inuseZero || mp.alloc_bytes != mp.free_bytes {
    420 				record(&p[idx], b)
    421 				idx++
    422 			}
    423 		}
    424 	}
    425 	unlock(&proflock)
    426 	return
    427 }
    428 
    429 // Write b's data to r.
    430 func record(r *MemProfileRecord, b *bucket) {
    431 	mp := b.mp()
    432 	r.AllocBytes = int64(mp.alloc_bytes)
    433 	r.FreeBytes = int64(mp.free_bytes)
    434 	r.AllocObjects = int64(mp.allocs)
    435 	r.FreeObjects = int64(mp.frees)
    436 	copy(r.Stack0[:], b.stk())
    437 	for i := int(b.nstk); i < len(r.Stack0); i++ {
    438 		r.Stack0[i] = 0
    439 	}
    440 }
    441 
    442 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
    443 	lock(&proflock)
    444 	for b := mbuckets; b != nil; b = b.allnext {
    445 		mp := b.mp()
    446 		fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees)
    447 	}
    448 	unlock(&proflock)
    449 }
    450 
    451 // BlockProfileRecord describes blocking events originated
    452 // at a particular call sequence (stack trace).
    453 type BlockProfileRecord struct {
    454 	Count  int64
    455 	Cycles int64
    456 	StackRecord
    457 }
    458 
    459 // BlockProfile returns n, the number of records in the current blocking profile.
    460 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
    461 // If len(p) < n, BlockProfile does not change p and returns n, false.
    462 //
    463 // Most clients should use the runtime/pprof package or
    464 // the testing package's -test.blockprofile flag instead
    465 // of calling BlockProfile directly.
    466 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
    467 	lock(&proflock)
    468 	for b := bbuckets; b != nil; b = b.allnext {
    469 		n++
    470 	}
    471 	if n <= len(p) {
    472 		ok = true
    473 		for b := bbuckets; b != nil; b = b.allnext {
    474 			bp := b.bp()
    475 			r := &p[0]
    476 			r.Count = int64(bp.count)
    477 			r.Cycles = int64(bp.cycles)
    478 			i := copy(r.Stack0[:], b.stk())
    479 			for ; i < len(r.Stack0); i++ {
    480 				r.Stack0[i] = 0
    481 			}
    482 			p = p[1:]
    483 		}
    484 	}
    485 	unlock(&proflock)
    486 	return
    487 }
    488 
    489 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
    490 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
    491 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
    492 //
    493 // Most clients should use the runtime/pprof package instead
    494 // of calling ThreadCreateProfile directly.
    495 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
    496 	first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
    497 	for mp := first; mp != nil; mp = mp.alllink {
    498 		n++
    499 	}
    500 	if n <= len(p) {
    501 		ok = true
    502 		i := 0
    503 		for mp := first; mp != nil; mp = mp.alllink {
    504 			for s := range mp.createstack {
    505 				p[i].Stack0[s] = uintptr(mp.createstack[s])
    506 			}
    507 			i++
    508 		}
    509 	}
    510 	return
    511 }
    512 
    513 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
    514 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
    515 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
    516 //
    517 // Most clients should use the runtime/pprof package instead
    518 // of calling GoroutineProfile directly.
    519 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
    520 
    521 	n = NumGoroutine()
    522 	if n <= len(p) {
    523 		gp := getg()
    524 		stopTheWorld("profile")
    525 
    526 		n = NumGoroutine()
    527 		if n <= len(p) {
    528 			ok = true
    529 			r := p
    530 			sp := getcallersp(unsafe.Pointer(&p))
    531 			pc := getcallerpc(unsafe.Pointer(&p))
    532 			systemstack(func() {
    533 				saveg(pc, sp, gp, &r[0])
    534 			})
    535 			r = r[1:]
    536 			for _, gp1 := range allgs {
    537 				if gp1 == gp || readgstatus(gp1) == _Gdead {
    538 					continue
    539 				}
    540 				saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
    541 				r = r[1:]
    542 			}
    543 		}
    544 
    545 		startTheWorld()
    546 	}
    547 
    548 	return n, ok
    549 }
    550 
    551 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
    552 	n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
    553 	if n < len(r.Stack0) {
    554 		r.Stack0[n] = 0
    555 	}
    556 }
    557 
    558 // Stack formats a stack trace of the calling goroutine into buf
    559 // and returns the number of bytes written to buf.
    560 // If all is true, Stack formats stack traces of all other goroutines
    561 // into buf after the trace for the current goroutine.
    562 func Stack(buf []byte, all bool) int {
    563 	if all {
    564 		stopTheWorld("stack trace")
    565 	}
    566 
    567 	n := 0
    568 	if len(buf) > 0 {
    569 		gp := getg()
    570 		sp := getcallersp(unsafe.Pointer(&buf))
    571 		pc := getcallerpc(unsafe.Pointer(&buf))
    572 		systemstack(func() {
    573 			g0 := getg()
    574 			g0.writebuf = buf[0:0:len(buf)]
    575 			goroutineheader(gp)
    576 			traceback(pc, sp, 0, gp)
    577 			if all {
    578 				tracebackothers(gp)
    579 			}
    580 			n = len(g0.writebuf)
    581 			g0.writebuf = nil
    582 		})
    583 	}
    584 
    585 	if all {
    586 		startTheWorld()
    587 	}
    588 	return n
    589 }
    590 
    591 // Tracing of alloc/free/gc.
    592 
    593 var tracelock mutex
    594 
    595 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
    596 	lock(&tracelock)
    597 	gp := getg()
    598 	gp.m.traceback = 2
    599 	if typ == nil {
    600 		print("tracealloc(", p, ", ", hex(size), ")\n")
    601 	} else {
    602 		print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n")
    603 	}
    604 	if gp.m.curg == nil || gp == gp.m.curg {
    605 		goroutineheader(gp)
    606 		pc := getcallerpc(unsafe.Pointer(&p))
    607 		sp := getcallersp(unsafe.Pointer(&p))
    608 		systemstack(func() {
    609 			traceback(pc, sp, 0, gp)
    610 		})
    611 	} else {
    612 		goroutineheader(gp.m.curg)
    613 		traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
    614 	}
    615 	print("\n")
    616 	gp.m.traceback = 0
    617 	unlock(&tracelock)
    618 }
    619 
    620 func tracefree(p unsafe.Pointer, size uintptr) {
    621 	lock(&tracelock)
    622 	gp := getg()
    623 	gp.m.traceback = 2
    624 	print("tracefree(", p, ", ", hex(size), ")\n")
    625 	goroutineheader(gp)
    626 	pc := getcallerpc(unsafe.Pointer(&p))
    627 	sp := getcallersp(unsafe.Pointer(&p))
    628 	systemstack(func() {
    629 		traceback(pc, sp, 0, gp)
    630 	})
    631 	print("\n")
    632 	gp.m.traceback = 0
    633 	unlock(&tracelock)
    634 }
    635 
    636 func tracegc() {
    637 	lock(&tracelock)
    638 	gp := getg()
    639 	gp.m.traceback = 2
    640 	print("tracegc()\n")
    641 	// running on m->g0 stack; show all non-g0 goroutines
    642 	tracebackothers(gp)
    643 	print("end tracegc\n")
    644 	print("\n")
    645 	gp.m.traceback = 0
    646 	unlock(&tracelock)
    647 }
    648