Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Malloc profiling.
      6 // Patterned after tcmalloc's algorithms; shorter code.
      7 
      8 package runtime
      9 
     10 import (
     11 	"runtime/internal/atomic"
     12 	"unsafe"
     13 )
     14 
     15 // NOTE(rsc): Everything here could use cas if contention became an issue.
     16 var proflock mutex
     17 
     18 // All memory allocations are local and do not escape outside of the profiler.
     19 // The profiler is forbidden from referring to garbage-collected memory.
     20 
     21 const (
     22 	// profile types
     23 	memProfile bucketType = 1 + iota
     24 	blockProfile
     25 	mutexProfile
     26 
     27 	// size of bucket hash table
     28 	buckHashSize = 179999
     29 
     30 	// max depth of stack to record in bucket
     31 	maxStack = 32
     32 )
     33 
     34 type bucketType int
     35 
     36 // A bucket holds per-call-stack profiling information.
     37 // The representation is a bit sleazy, inherited from C.
     38 // This struct defines the bucket header. It is followed in
     39 // memory by the stack words and then the actual record
     40 // data, either a memRecord or a blockRecord.
     41 //
     42 // Per-call-stack profiling information.
     43 // Lookup by hashing call stack into a linked-list hash table.
     44 //
     45 // No heap pointers.
     46 //
     47 //go:notinheap
     48 type bucket struct {
     49 	next    *bucket
     50 	allnext *bucket
     51 	typ     bucketType // memBucket or blockBucket (includes mutexProfile)
     52 	hash    uintptr
     53 	size    uintptr
     54 	nstk    uintptr
     55 }
     56 
     57 // A memRecord is the bucket data for a bucket of type memProfile,
     58 // part of the memory profile.
     59 type memRecord struct {
     60 	// The following complex 3-stage scheme of stats accumulation
     61 	// is required to obtain a consistent picture of mallocs and frees
     62 	// for some point in time.
     63 	// The problem is that mallocs come in real time, while frees
     64 	// come only after a GC during concurrent sweeping. So if we would
     65 	// naively count them, we would get a skew toward mallocs.
     66 	//
     67 	// Hence, we delay information to get consistent snapshots as
     68 	// of mark termination. Allocations count toward the next mark
     69 	// termination's snapshot, while sweep frees count toward the
     70 	// previous mark termination's snapshot:
     71 	//
     72 	//              MT          MT          MT          MT
     73 	//             .|         .|         .|         .|
     74 	//          .  |      .  |      .  |      .  |
     75 	//       .     |   .     |   .     |   .     |
     76 	//    .        |.        |.        |.        |
     77 	//
     78 	//       alloc    free
     79 	//               P
     80 	//       C+2         C+1      C
     81 	//
     82 	//                   alloc    free
     83 	//                           P
     84 	//                   C+2         C+1      C
     85 	//
     86 	// Since we can't publish a consistent snapshot until all of
     87 	// the sweep frees are accounted for, we wait until the next
     88 	// mark termination ("MT" above) to publish the previous mark
     89 	// termination's snapshot ("P" above). To do this, allocation
     90 	// and free events are accounted to *future* heap profile
     91 	// cycles ("C+n" above) and we only publish a cycle once all
     92 	// of the events from that cycle must be done. Specifically:
     93 	//
     94 	// Mallocs are accounted to cycle C+2.
     95 	// Explicit frees are accounted to cycle C+2.
     96 	// GC frees (done during sweeping) are accounted to cycle C+1.
     97 	//
     98 	// After mark termination, we increment the global heap
     99 	// profile cycle counter and accumulate the stats from cycle C
    100 	// into the active profile.
    101 
    102 	// active is the currently published profile. A profiling
    103 	// cycle can be accumulated into active once its complete.
    104 	active memRecordCycle
    105 
    106 	// future records the profile events we're counting for cycles
    107 	// that have not yet been published. This is ring buffer
    108 	// indexed by the global heap profile cycle C and stores
    109 	// cycles C, C+1, and C+2. Unlike active, these counts are
    110 	// only for a single cycle; they are not cumulative across
    111 	// cycles.
    112 	//
    113 	// We store cycle C here because there's a window between when
    114 	// C becomes the active cycle and when we've flushed it to
    115 	// active.
    116 	future [3]memRecordCycle
    117 }
    118 
    119 // memRecordCycle
    120 type memRecordCycle struct {
    121 	allocs, frees           uintptr
    122 	alloc_bytes, free_bytes uintptr
    123 }
    124 
    125 // add accumulates b into a. It does not zero b.
    126 func (a *memRecordCycle) add(b *memRecordCycle) {
    127 	a.allocs += b.allocs
    128 	a.frees += b.frees
    129 	a.alloc_bytes += b.alloc_bytes
    130 	a.free_bytes += b.free_bytes
    131 }
    132 
    133 // A blockRecord is the bucket data for a bucket of type blockProfile,
    134 // which is used in blocking and mutex profiles.
    135 type blockRecord struct {
    136 	count  int64
    137 	cycles int64
    138 }
    139 
    140 var (
    141 	mbuckets  *bucket // memory profile buckets
    142 	bbuckets  *bucket // blocking profile buckets
    143 	xbuckets  *bucket // mutex profile buckets
    144 	buckhash  *[179999]*bucket
    145 	bucketmem uintptr
    146 
    147 	mProf struct {
    148 		// All fields in mProf are protected by proflock.
    149 
    150 		// cycle is the global heap profile cycle. This wraps
    151 		// at mProfCycleWrap.
    152 		cycle uint32
    153 		// flushed indicates that future[cycle] in all buckets
    154 		// has been flushed to the active profile.
    155 		flushed bool
    156 	}
    157 )
    158 
    159 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
    160 
    161 // newBucket allocates a bucket with the given type and number of stack entries.
    162 func newBucket(typ bucketType, nstk int) *bucket {
    163 	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
    164 	switch typ {
    165 	default:
    166 		throw("invalid profile bucket type")
    167 	case memProfile:
    168 		size += unsafe.Sizeof(memRecord{})
    169 	case blockProfile, mutexProfile:
    170 		size += unsafe.Sizeof(blockRecord{})
    171 	}
    172 
    173 	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
    174 	bucketmem += size
    175 	b.typ = typ
    176 	b.nstk = uintptr(nstk)
    177 	return b
    178 }
    179 
    180 // stk returns the slice in b holding the stack.
    181 func (b *bucket) stk() []uintptr {
    182 	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
    183 	return stk[:b.nstk:b.nstk]
    184 }
    185 
    186 // mp returns the memRecord associated with the memProfile bucket b.
    187 func (b *bucket) mp() *memRecord {
    188 	if b.typ != memProfile {
    189 		throw("bad use of bucket.mp")
    190 	}
    191 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    192 	return (*memRecord)(data)
    193 }
    194 
    195 // bp returns the blockRecord associated with the blockProfile bucket b.
    196 func (b *bucket) bp() *blockRecord {
    197 	if b.typ != blockProfile && b.typ != mutexProfile {
    198 		throw("bad use of bucket.bp")
    199 	}
    200 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    201 	return (*blockRecord)(data)
    202 }
    203 
    204 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
    205 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
    206 	if buckhash == nil {
    207 		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
    208 		if buckhash == nil {
    209 			throw("runtime: cannot allocate memory")
    210 		}
    211 	}
    212 
    213 	// Hash stack.
    214 	var h uintptr
    215 	for _, pc := range stk {
    216 		h += pc
    217 		h += h << 10
    218 		h ^= h >> 6
    219 	}
    220 	// hash in size
    221 	h += size
    222 	h += h << 10
    223 	h ^= h >> 6
    224 	// finalize
    225 	h += h << 3
    226 	h ^= h >> 11
    227 
    228 	i := int(h % buckHashSize)
    229 	for b := buckhash[i]; b != nil; b = b.next {
    230 		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
    231 			return b
    232 		}
    233 	}
    234 
    235 	if !alloc {
    236 		return nil
    237 	}
    238 
    239 	// Create new bucket.
    240 	b := newBucket(typ, len(stk))
    241 	copy(b.stk(), stk)
    242 	b.hash = h
    243 	b.size = size
    244 	b.next = buckhash[i]
    245 	buckhash[i] = b
    246 	if typ == memProfile {
    247 		b.allnext = mbuckets
    248 		mbuckets = b
    249 	} else if typ == mutexProfile {
    250 		b.allnext = xbuckets
    251 		xbuckets = b
    252 	} else {
    253 		b.allnext = bbuckets
    254 		bbuckets = b
    255 	}
    256 	return b
    257 }
    258 
    259 func eqslice(x, y []uintptr) bool {
    260 	if len(x) != len(y) {
    261 		return false
    262 	}
    263 	for i, xi := range x {
    264 		if xi != y[i] {
    265 			return false
    266 		}
    267 	}
    268 	return true
    269 }
    270 
    271 // mProf_NextCycle publishes the next heap profile cycle and creates a
    272 // fresh heap profile cycle. This operation is fast and can be done
    273 // during STW. The caller must call mProf_Flush before calling
    274 // mProf_NextCycle again.
    275 //
    276 // This is called by mark termination during STW so allocations and
    277 // frees after the world is started again count towards a new heap
    278 // profiling cycle.
    279 func mProf_NextCycle() {
    280 	lock(&proflock)
    281 	// We explicitly wrap mProf.cycle rather than depending on
    282 	// uint wraparound because the memRecord.future ring does not
    283 	// itself wrap at a power of two.
    284 	mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
    285 	mProf.flushed = false
    286 	unlock(&proflock)
    287 }
    288 
    289 // mProf_Flush flushes the events from the current heap profiling
    290 // cycle into the active profile. After this it is safe to start a new
    291 // heap profiling cycle with mProf_NextCycle.
    292 //
    293 // This is called by GC after mark termination starts the world. In
    294 // contrast with mProf_NextCycle, this is somewhat expensive, but safe
    295 // to do concurrently.
    296 func mProf_Flush() {
    297 	lock(&proflock)
    298 	if !mProf.flushed {
    299 		mProf_FlushLocked()
    300 		mProf.flushed = true
    301 	}
    302 	unlock(&proflock)
    303 }
    304 
    305 func mProf_FlushLocked() {
    306 	c := mProf.cycle
    307 	for b := mbuckets; b != nil; b = b.allnext {
    308 		mp := b.mp()
    309 
    310 		// Flush cycle C into the published profile and clear
    311 		// it for reuse.
    312 		mpc := &mp.future[c%uint32(len(mp.future))]
    313 		mp.active.add(mpc)
    314 		*mpc = memRecordCycle{}
    315 	}
    316 }
    317 
    318 // mProf_PostSweep records that all sweep frees for this GC cycle have
    319 // completed. This has the effect of publishing the heap profile
    320 // snapshot as of the last mark termination without advancing the heap
    321 // profile cycle.
    322 func mProf_PostSweep() {
    323 	lock(&proflock)
    324 	// Flush cycle C+1 to the active profile so everything as of
    325 	// the last mark termination becomes visible. *Don't* advance
    326 	// the cycle, since we're still accumulating allocs in cycle
    327 	// C+2, which have to become C+1 in the next mark termination
    328 	// and so on.
    329 	c := mProf.cycle
    330 	for b := mbuckets; b != nil; b = b.allnext {
    331 		mp := b.mp()
    332 		mpc := &mp.future[(c+1)%uint32(len(mp.future))]
    333 		mp.active.add(mpc)
    334 		*mpc = memRecordCycle{}
    335 	}
    336 	unlock(&proflock)
    337 }
    338 
    339 // Called by malloc to record a profiled block.
    340 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
    341 	var stk [maxStack]uintptr
    342 	nstk := callers(4, stk[:])
    343 	lock(&proflock)
    344 	b := stkbucket(memProfile, size, stk[:nstk], true)
    345 	c := mProf.cycle
    346 	mp := b.mp()
    347 	mpc := &mp.future[(c+2)%uint32(len(mp.future))]
    348 	mpc.allocs++
    349 	mpc.alloc_bytes += size
    350 	unlock(&proflock)
    351 
    352 	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
    353 	// This reduces potential contention and chances of deadlocks.
    354 	// Since the object must be alive during call to mProf_Malloc,
    355 	// it's fine to do this non-atomically.
    356 	systemstack(func() {
    357 		setprofilebucket(p, b)
    358 	})
    359 }
    360 
    361 // Called when freeing a profiled block.
    362 func mProf_Free(b *bucket, size uintptr) {
    363 	lock(&proflock)
    364 	c := mProf.cycle
    365 	mp := b.mp()
    366 	mpc := &mp.future[(c+1)%uint32(len(mp.future))]
    367 	mpc.frees++
    368 	mpc.free_bytes += size
    369 	unlock(&proflock)
    370 }
    371 
    372 var blockprofilerate uint64 // in CPU ticks
    373 
    374 // SetBlockProfileRate controls the fraction of goroutine blocking events
    375 // that are reported in the blocking profile. The profiler aims to sample
    376 // an average of one blocking event per rate nanoseconds spent blocked.
    377 //
    378 // To include every blocking event in the profile, pass rate = 1.
    379 // To turn off profiling entirely, pass rate <= 0.
    380 func SetBlockProfileRate(rate int) {
    381 	var r int64
    382 	if rate <= 0 {
    383 		r = 0 // disable profiling
    384 	} else if rate == 1 {
    385 		r = 1 // profile everything
    386 	} else {
    387 		// convert ns to cycles, use float64 to prevent overflow during multiplication
    388 		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
    389 		if r == 0 {
    390 			r = 1
    391 		}
    392 	}
    393 
    394 	atomic.Store64(&blockprofilerate, uint64(r))
    395 }
    396 
    397 func blockevent(cycles int64, skip int) {
    398 	if cycles <= 0 {
    399 		cycles = 1
    400 	}
    401 	if blocksampled(cycles) {
    402 		saveblockevent(cycles, skip+1, blockProfile)
    403 	}
    404 }
    405 
    406 func blocksampled(cycles int64) bool {
    407 	rate := int64(atomic.Load64(&blockprofilerate))
    408 	if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
    409 		return false
    410 	}
    411 	return true
    412 }
    413 
    414 func saveblockevent(cycles int64, skip int, which bucketType) {
    415 	gp := getg()
    416 	var nstk int
    417 	var stk [maxStack]uintptr
    418 	if gp.m.curg == nil || gp.m.curg == gp {
    419 		nstk = callers(skip, stk[:])
    420 	} else {
    421 		nstk = gcallers(gp.m.curg, skip, stk[:])
    422 	}
    423 	lock(&proflock)
    424 	b := stkbucket(which, 0, stk[:nstk], true)
    425 	b.bp().count++
    426 	b.bp().cycles += cycles
    427 	unlock(&proflock)
    428 }
    429 
    430 var mutexprofilerate uint64 // fraction sampled
    431 
    432 // SetMutexProfileFraction controls the fraction of mutex contention events
    433 // that are reported in the mutex profile. On average 1/rate events are
    434 // reported. The previous rate is returned.
    435 //
    436 // To turn off profiling entirely, pass rate 0.
    437 // To just read the current rate, pass rate -1.
    438 // (For n>1 the details of sampling may change.)
    439 func SetMutexProfileFraction(rate int) int {
    440 	if rate < 0 {
    441 		return int(mutexprofilerate)
    442 	}
    443 	old := mutexprofilerate
    444 	atomic.Store64(&mutexprofilerate, uint64(rate))
    445 	return int(old)
    446 }
    447 
    448 //go:linkname mutexevent sync.event
    449 func mutexevent(cycles int64, skip int) {
    450 	if cycles < 0 {
    451 		cycles = 0
    452 	}
    453 	rate := int64(atomic.Load64(&mutexprofilerate))
    454 	// TODO(pjw): measure impact of always calling fastrand vs using something
    455 	// like malloc.go:nextSample()
    456 	if rate > 0 && int64(fastrand())%rate == 0 {
    457 		saveblockevent(cycles, skip+1, mutexProfile)
    458 	}
    459 }
    460 
    461 // Go interface to profile data.
    462 
    463 // A StackRecord describes a single execution stack.
    464 type StackRecord struct {
    465 	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
    466 }
    467 
    468 // Stack returns the stack trace associated with the record,
    469 // a prefix of r.Stack0.
    470 func (r *StackRecord) Stack() []uintptr {
    471 	for i, v := range r.Stack0 {
    472 		if v == 0 {
    473 			return r.Stack0[0:i]
    474 		}
    475 	}
    476 	return r.Stack0[0:]
    477 }
    478 
    479 // MemProfileRate controls the fraction of memory allocations
    480 // that are recorded and reported in the memory profile.
    481 // The profiler aims to sample an average of
    482 // one allocation per MemProfileRate bytes allocated.
    483 //
    484 // To include every allocated block in the profile, set MemProfileRate to 1.
    485 // To turn off profiling entirely, set MemProfileRate to 0.
    486 //
    487 // The tools that process the memory profiles assume that the
    488 // profile rate is constant across the lifetime of the program
    489 // and equal to the current value. Programs that change the
    490 // memory profiling rate should do so just once, as early as
    491 // possible in the execution of the program (for example,
    492 // at the beginning of main).
    493 var MemProfileRate int = 512 * 1024
    494 
    495 // A MemProfileRecord describes the live objects allocated
    496 // by a particular call sequence (stack trace).
    497 type MemProfileRecord struct {
    498 	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
    499 	AllocObjects, FreeObjects int64       // number of objects allocated, freed
    500 	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
    501 }
    502 
    503 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
    504 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
    505 
    506 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
    507 func (r *MemProfileRecord) InUseObjects() int64 {
    508 	return r.AllocObjects - r.FreeObjects
    509 }
    510 
    511 // Stack returns the stack trace associated with the record,
    512 // a prefix of r.Stack0.
    513 func (r *MemProfileRecord) Stack() []uintptr {
    514 	for i, v := range r.Stack0 {
    515 		if v == 0 {
    516 			return r.Stack0[0:i]
    517 		}
    518 	}
    519 	return r.Stack0[0:]
    520 }
    521 
    522 // MemProfile returns a profile of memory allocated and freed per allocation
    523 // site.
    524 //
    525 // MemProfile returns n, the number of records in the current memory profile.
    526 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
    527 // If len(p) < n, MemProfile does not change p and returns n, false.
    528 //
    529 // If inuseZero is true, the profile includes allocation records
    530 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
    531 // These are sites where memory was allocated, but it has all
    532 // been released back to the runtime.
    533 //
    534 // The returned profile may be up to two garbage collection cycles old.
    535 // This is to avoid skewing the profile toward allocations; because
    536 // allocations happen in real time but frees are delayed until the garbage
    537 // collector performs sweeping, the profile only accounts for allocations
    538 // that have had a chance to be freed by the garbage collector.
    539 //
    540 // Most clients should use the runtime/pprof package or
    541 // the testing package's -test.memprofile flag instead
    542 // of calling MemProfile directly.
    543 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
    544 	lock(&proflock)
    545 	// If we're between mProf_NextCycle and mProf_Flush, take care
    546 	// of flushing to the active profile so we only have to look
    547 	// at the active profile below.
    548 	mProf_FlushLocked()
    549 	clear := true
    550 	for b := mbuckets; b != nil; b = b.allnext {
    551 		mp := b.mp()
    552 		if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
    553 			n++
    554 		}
    555 		if mp.active.allocs != 0 || mp.active.frees != 0 {
    556 			clear = false
    557 		}
    558 	}
    559 	if clear {
    560 		// Absolutely no data, suggesting that a garbage collection
    561 		// has not yet happened. In order to allow profiling when
    562 		// garbage collection is disabled from the beginning of execution,
    563 		// accumulate all of the cycles, and recount buckets.
    564 		n = 0
    565 		for b := mbuckets; b != nil; b = b.allnext {
    566 			mp := b.mp()
    567 			for c := range mp.future {
    568 				mp.active.add(&mp.future[c])
    569 				mp.future[c] = memRecordCycle{}
    570 			}
    571 			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
    572 				n++
    573 			}
    574 		}
    575 	}
    576 	if n <= len(p) {
    577 		ok = true
    578 		idx := 0
    579 		for b := mbuckets; b != nil; b = b.allnext {
    580 			mp := b.mp()
    581 			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
    582 				record(&p[idx], b)
    583 				idx++
    584 			}
    585 		}
    586 	}
    587 	unlock(&proflock)
    588 	return
    589 }
    590 
    591 // Write b's data to r.
    592 func record(r *MemProfileRecord, b *bucket) {
    593 	mp := b.mp()
    594 	r.AllocBytes = int64(mp.active.alloc_bytes)
    595 	r.FreeBytes = int64(mp.active.free_bytes)
    596 	r.AllocObjects = int64(mp.active.allocs)
    597 	r.FreeObjects = int64(mp.active.frees)
    598 	if raceenabled {
    599 		racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
    600 	}
    601 	if msanenabled {
    602 		msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
    603 	}
    604 	copy(r.Stack0[:], b.stk())
    605 	for i := int(b.nstk); i < len(r.Stack0); i++ {
    606 		r.Stack0[i] = 0
    607 	}
    608 }
    609 
    610 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
    611 	lock(&proflock)
    612 	for b := mbuckets; b != nil; b = b.allnext {
    613 		mp := b.mp()
    614 		fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
    615 	}
    616 	unlock(&proflock)
    617 }
    618 
    619 // BlockProfileRecord describes blocking events originated
    620 // at a particular call sequence (stack trace).
    621 type BlockProfileRecord struct {
    622 	Count  int64
    623 	Cycles int64
    624 	StackRecord
    625 }
    626 
    627 // BlockProfile returns n, the number of records in the current blocking profile.
    628 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
    629 // If len(p) < n, BlockProfile does not change p and returns n, false.
    630 //
    631 // Most clients should use the runtime/pprof package or
    632 // the testing package's -test.blockprofile flag instead
    633 // of calling BlockProfile directly.
    634 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
    635 	lock(&proflock)
    636 	for b := bbuckets; b != nil; b = b.allnext {
    637 		n++
    638 	}
    639 	if n <= len(p) {
    640 		ok = true
    641 		for b := bbuckets; b != nil; b = b.allnext {
    642 			bp := b.bp()
    643 			r := &p[0]
    644 			r.Count = bp.count
    645 			r.Cycles = bp.cycles
    646 			if raceenabled {
    647 				racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
    648 			}
    649 			if msanenabled {
    650 				msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
    651 			}
    652 			i := copy(r.Stack0[:], b.stk())
    653 			for ; i < len(r.Stack0); i++ {
    654 				r.Stack0[i] = 0
    655 			}
    656 			p = p[1:]
    657 		}
    658 	}
    659 	unlock(&proflock)
    660 	return
    661 }
    662 
    663 // MutexProfile returns n, the number of records in the current mutex profile.
    664 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
    665 // Otherwise, MutexProfile does not change p, and returns n, false.
    666 //
    667 // Most clients should use the runtime/pprof package
    668 // instead of calling MutexProfile directly.
    669 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
    670 	lock(&proflock)
    671 	for b := xbuckets; b != nil; b = b.allnext {
    672 		n++
    673 	}
    674 	if n <= len(p) {
    675 		ok = true
    676 		for b := xbuckets; b != nil; b = b.allnext {
    677 			bp := b.bp()
    678 			r := &p[0]
    679 			r.Count = int64(bp.count)
    680 			r.Cycles = bp.cycles
    681 			i := copy(r.Stack0[:], b.stk())
    682 			for ; i < len(r.Stack0); i++ {
    683 				r.Stack0[i] = 0
    684 			}
    685 			p = p[1:]
    686 		}
    687 	}
    688 	unlock(&proflock)
    689 	return
    690 }
    691 
    692 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
    693 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
    694 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
    695 //
    696 // Most clients should use the runtime/pprof package instead
    697 // of calling ThreadCreateProfile directly.
    698 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
    699 	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
    700 	for mp := first; mp != nil; mp = mp.alllink {
    701 		n++
    702 	}
    703 	if n <= len(p) {
    704 		ok = true
    705 		i := 0
    706 		for mp := first; mp != nil; mp = mp.alllink {
    707 			p[i].Stack0 = mp.createstack
    708 			i++
    709 		}
    710 	}
    711 	return
    712 }
    713 
    714 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
    715 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
    716 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
    717 //
    718 // Most clients should use the runtime/pprof package instead
    719 // of calling GoroutineProfile directly.
    720 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
    721 	gp := getg()
    722 
    723 	isOK := func(gp1 *g) bool {
    724 		// Checking isSystemGoroutine here makes GoroutineProfile
    725 		// consistent with both NumGoroutine and Stack.
    726 		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
    727 	}
    728 
    729 	stopTheWorld("profile")
    730 
    731 	n = 1
    732 	for _, gp1 := range allgs {
    733 		if isOK(gp1) {
    734 			n++
    735 		}
    736 	}
    737 
    738 	if n <= len(p) {
    739 		ok = true
    740 		r := p
    741 
    742 		// Save current goroutine.
    743 		sp := getcallersp(unsafe.Pointer(&p))
    744 		pc := getcallerpc()
    745 		systemstack(func() {
    746 			saveg(pc, sp, gp, &r[0])
    747 		})
    748 		r = r[1:]
    749 
    750 		// Save other goroutines.
    751 		for _, gp1 := range allgs {
    752 			if isOK(gp1) {
    753 				if len(r) == 0 {
    754 					// Should be impossible, but better to return a
    755 					// truncated profile than to crash the entire process.
    756 					break
    757 				}
    758 				saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
    759 				r = r[1:]
    760 			}
    761 		}
    762 	}
    763 
    764 	startTheWorld()
    765 
    766 	return n, ok
    767 }
    768 
    769 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
    770 	n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
    771 	if n < len(r.Stack0) {
    772 		r.Stack0[n] = 0
    773 	}
    774 }
    775 
    776 // Stack formats a stack trace of the calling goroutine into buf
    777 // and returns the number of bytes written to buf.
    778 // If all is true, Stack formats stack traces of all other goroutines
    779 // into buf after the trace for the current goroutine.
    780 func Stack(buf []byte, all bool) int {
    781 	if all {
    782 		stopTheWorld("stack trace")
    783 	}
    784 
    785 	n := 0
    786 	if len(buf) > 0 {
    787 		gp := getg()
    788 		sp := getcallersp(unsafe.Pointer(&buf))
    789 		pc := getcallerpc()
    790 		systemstack(func() {
    791 			g0 := getg()
    792 			// Force traceback=1 to override GOTRACEBACK setting,
    793 			// so that Stack's results are consistent.
    794 			// GOTRACEBACK is only about crash dumps.
    795 			g0.m.traceback = 1
    796 			g0.writebuf = buf[0:0:len(buf)]
    797 			goroutineheader(gp)
    798 			traceback(pc, sp, 0, gp)
    799 			if all {
    800 				tracebackothers(gp)
    801 			}
    802 			g0.m.traceback = 0
    803 			n = len(g0.writebuf)
    804 			g0.writebuf = nil
    805 		})
    806 	}
    807 
    808 	if all {
    809 		startTheWorld()
    810 	}
    811 	return n
    812 }
    813 
    814 // Tracing of alloc/free/gc.
    815 
    816 var tracelock mutex
    817 
    818 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
    819 	lock(&tracelock)
    820 	gp := getg()
    821 	gp.m.traceback = 2
    822 	if typ == nil {
    823 		print("tracealloc(", p, ", ", hex(size), ")\n")
    824 	} else {
    825 		print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
    826 	}
    827 	if gp.m.curg == nil || gp == gp.m.curg {
    828 		goroutineheader(gp)
    829 		pc := getcallerpc()
    830 		sp := getcallersp(unsafe.Pointer(&p))
    831 		systemstack(func() {
    832 			traceback(pc, sp, 0, gp)
    833 		})
    834 	} else {
    835 		goroutineheader(gp.m.curg)
    836 		traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
    837 	}
    838 	print("\n")
    839 	gp.m.traceback = 0
    840 	unlock(&tracelock)
    841 }
    842 
    843 func tracefree(p unsafe.Pointer, size uintptr) {
    844 	lock(&tracelock)
    845 	gp := getg()
    846 	gp.m.traceback = 2
    847 	print("tracefree(", p, ", ", hex(size), ")\n")
    848 	goroutineheader(gp)
    849 	pc := getcallerpc()
    850 	sp := getcallersp(unsafe.Pointer(&p))
    851 	systemstack(func() {
    852 		traceback(pc, sp, 0, gp)
    853 	})
    854 	print("\n")
    855 	gp.m.traceback = 0
    856 	unlock(&tracelock)
    857 }
    858 
    859 func tracegc() {
    860 	lock(&tracelock)
    861 	gp := getg()
    862 	gp.m.traceback = 2
    863 	print("tracegc()\n")
    864 	// running on m->g0 stack; show all non-g0 goroutines
    865 	tracebackothers(gp)
    866 	print("end tracegc\n")
    867 	print("\n")
    868 	gp.m.traceback = 0
    869 	unlock(&tracelock)
    870 }
    871