Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Malloc profiling.
      6 // Patterned after tcmalloc's algorithms; shorter code.
      7 
      8 package runtime
      9 
     10 import (
     11 	"runtime/internal/atomic"
     12 	"unsafe"
     13 )
     14 
     15 // NOTE(rsc): Everything here could use cas if contention became an issue.
     16 var proflock mutex
     17 
     18 // All memory allocations are local and do not escape outside of the profiler.
     19 // The profiler is forbidden from referring to garbage-collected memory.
     20 
     21 const (
     22 	// profile types
     23 	memProfile bucketType = 1 + iota
     24 	blockProfile
     25 	mutexProfile
     26 
     27 	// size of bucket hash table
     28 	buckHashSize = 179999
     29 
     30 	// max depth of stack to record in bucket
     31 	maxStack = 32
     32 )
     33 
     34 type bucketType int
     35 
     36 // A bucket holds per-call-stack profiling information.
     37 // The representation is a bit sleazy, inherited from C.
     38 // This struct defines the bucket header. It is followed in
     39 // memory by the stack words and then the actual record
     40 // data, either a memRecord or a blockRecord.
     41 //
     42 // Per-call-stack profiling information.
     43 // Lookup by hashing call stack into a linked-list hash table.
     44 //
     45 // No heap pointers.
     46 //
     47 //go:notinheap
     48 type bucket struct {
     49 	next    *bucket
     50 	allnext *bucket
     51 	typ     bucketType // memBucket or blockBucket (includes mutexProfile)
     52 	hash    uintptr
     53 	size    uintptr
     54 	nstk    uintptr
     55 }
     56 
     57 // A memRecord is the bucket data for a bucket of type memProfile,
     58 // part of the memory profile.
     59 type memRecord struct {
     60 	// The following complex 3-stage scheme of stats accumulation
     61 	// is required to obtain a consistent picture of mallocs and frees
     62 	// for some point in time.
     63 	// The problem is that mallocs come in real time, while frees
     64 	// come only after a GC during concurrent sweeping. So if we would
     65 	// naively count them, we would get a skew toward mallocs.
     66 	//
     67 	// Mallocs are accounted in recent stats.
     68 	// Explicit frees are accounted in recent stats.
     69 	// GC frees are accounted in prev stats.
     70 	// After GC prev stats are added to final stats and
     71 	// recent stats are moved into prev stats.
     72 	allocs      uintptr
     73 	frees       uintptr
     74 	alloc_bytes uintptr
     75 	free_bytes  uintptr
     76 
     77 	// changes between next-to-last GC and last GC
     78 	prev_allocs      uintptr
     79 	prev_frees       uintptr
     80 	prev_alloc_bytes uintptr
     81 	prev_free_bytes  uintptr
     82 
     83 	// changes since last GC
     84 	recent_allocs      uintptr
     85 	recent_frees       uintptr
     86 	recent_alloc_bytes uintptr
     87 	recent_free_bytes  uintptr
     88 }
     89 
     90 // A blockRecord is the bucket data for a bucket of type blockProfile,
     91 // which is used in blocking and mutex profiles.
     92 type blockRecord struct {
     93 	count  int64
     94 	cycles int64
     95 }
     96 
     97 var (
     98 	mbuckets  *bucket // memory profile buckets
     99 	bbuckets  *bucket // blocking profile buckets
    100 	xbuckets  *bucket // mutex profile buckets
    101 	buckhash  *[179999]*bucket
    102 	bucketmem uintptr
    103 )
    104 
    105 // newBucket allocates a bucket with the given type and number of stack entries.
    106 func newBucket(typ bucketType, nstk int) *bucket {
    107 	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
    108 	switch typ {
    109 	default:
    110 		throw("invalid profile bucket type")
    111 	case memProfile:
    112 		size += unsafe.Sizeof(memRecord{})
    113 	case blockProfile, mutexProfile:
    114 		size += unsafe.Sizeof(blockRecord{})
    115 	}
    116 
    117 	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
    118 	bucketmem += size
    119 	b.typ = typ
    120 	b.nstk = uintptr(nstk)
    121 	return b
    122 }
    123 
    124 // stk returns the slice in b holding the stack.
    125 func (b *bucket) stk() []uintptr {
    126 	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
    127 	return stk[:b.nstk:b.nstk]
    128 }
    129 
    130 // mp returns the memRecord associated with the memProfile bucket b.
    131 func (b *bucket) mp() *memRecord {
    132 	if b.typ != memProfile {
    133 		throw("bad use of bucket.mp")
    134 	}
    135 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    136 	return (*memRecord)(data)
    137 }
    138 
    139 // bp returns the blockRecord associated with the blockProfile bucket b.
    140 func (b *bucket) bp() *blockRecord {
    141 	if b.typ != blockProfile && b.typ != mutexProfile {
    142 		throw("bad use of bucket.bp")
    143 	}
    144 	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
    145 	return (*blockRecord)(data)
    146 }
    147 
    148 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
    149 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
    150 	if buckhash == nil {
    151 		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
    152 		if buckhash == nil {
    153 			throw("runtime: cannot allocate memory")
    154 		}
    155 	}
    156 
    157 	// Hash stack.
    158 	var h uintptr
    159 	for _, pc := range stk {
    160 		h += pc
    161 		h += h << 10
    162 		h ^= h >> 6
    163 	}
    164 	// hash in size
    165 	h += size
    166 	h += h << 10
    167 	h ^= h >> 6
    168 	// finalize
    169 	h += h << 3
    170 	h ^= h >> 11
    171 
    172 	i := int(h % buckHashSize)
    173 	for b := buckhash[i]; b != nil; b = b.next {
    174 		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
    175 			return b
    176 		}
    177 	}
    178 
    179 	if !alloc {
    180 		return nil
    181 	}
    182 
    183 	// Create new bucket.
    184 	b := newBucket(typ, len(stk))
    185 	copy(b.stk(), stk)
    186 	b.hash = h
    187 	b.size = size
    188 	b.next = buckhash[i]
    189 	buckhash[i] = b
    190 	if typ == memProfile {
    191 		b.allnext = mbuckets
    192 		mbuckets = b
    193 	} else if typ == mutexProfile {
    194 		b.allnext = xbuckets
    195 		xbuckets = b
    196 	} else {
    197 		b.allnext = bbuckets
    198 		bbuckets = b
    199 	}
    200 	return b
    201 }
    202 
    203 func eqslice(x, y []uintptr) bool {
    204 	if len(x) != len(y) {
    205 		return false
    206 	}
    207 	for i, xi := range x {
    208 		if xi != y[i] {
    209 			return false
    210 		}
    211 	}
    212 	return true
    213 }
    214 
    215 func mprof_GC() {
    216 	for b := mbuckets; b != nil; b = b.allnext {
    217 		mp := b.mp()
    218 		mp.allocs += mp.prev_allocs
    219 		mp.frees += mp.prev_frees
    220 		mp.alloc_bytes += mp.prev_alloc_bytes
    221 		mp.free_bytes += mp.prev_free_bytes
    222 
    223 		mp.prev_allocs = mp.recent_allocs
    224 		mp.prev_frees = mp.recent_frees
    225 		mp.prev_alloc_bytes = mp.recent_alloc_bytes
    226 		mp.prev_free_bytes = mp.recent_free_bytes
    227 
    228 		mp.recent_allocs = 0
    229 		mp.recent_frees = 0
    230 		mp.recent_alloc_bytes = 0
    231 		mp.recent_free_bytes = 0
    232 	}
    233 }
    234 
    235 // Record that a gc just happened: all the 'recent' statistics are now real.
    236 func mProf_GC() {
    237 	lock(&proflock)
    238 	mprof_GC()
    239 	unlock(&proflock)
    240 }
    241 
    242 // Called by malloc to record a profiled block.
    243 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
    244 	var stk [maxStack]uintptr
    245 	nstk := callers(4, stk[:])
    246 	lock(&proflock)
    247 	b := stkbucket(memProfile, size, stk[:nstk], true)
    248 	mp := b.mp()
    249 	mp.recent_allocs++
    250 	mp.recent_alloc_bytes += size
    251 	unlock(&proflock)
    252 
    253 	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
    254 	// This reduces potential contention and chances of deadlocks.
    255 	// Since the object must be alive during call to mProf_Malloc,
    256 	// it's fine to do this non-atomically.
    257 	systemstack(func() {
    258 		setprofilebucket(p, b)
    259 	})
    260 }
    261 
    262 // Called when freeing a profiled block.
    263 func mProf_Free(b *bucket, size uintptr) {
    264 	lock(&proflock)
    265 	mp := b.mp()
    266 	mp.prev_frees++
    267 	mp.prev_free_bytes += size
    268 	unlock(&proflock)
    269 }
    270 
    271 var blockprofilerate uint64 // in CPU ticks
    272 
    273 // SetBlockProfileRate controls the fraction of goroutine blocking events
    274 // that are reported in the blocking profile. The profiler aims to sample
    275 // an average of one blocking event per rate nanoseconds spent blocked.
    276 //
    277 // To include every blocking event in the profile, pass rate = 1.
    278 // To turn off profiling entirely, pass rate <= 0.
    279 func SetBlockProfileRate(rate int) {
    280 	var r int64
    281 	if rate <= 0 {
    282 		r = 0 // disable profiling
    283 	} else if rate == 1 {
    284 		r = 1 // profile everything
    285 	} else {
    286 		// convert ns to cycles, use float64 to prevent overflow during multiplication
    287 		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
    288 		if r == 0 {
    289 			r = 1
    290 		}
    291 	}
    292 
    293 	atomic.Store64(&blockprofilerate, uint64(r))
    294 }
    295 
    296 func blockevent(cycles int64, skip int) {
    297 	if cycles <= 0 {
    298 		cycles = 1
    299 	}
    300 	if blocksampled(cycles) {
    301 		saveblockevent(cycles, skip+1, blockProfile, &blockprofilerate)
    302 	}
    303 }
    304 
    305 func blocksampled(cycles int64) bool {
    306 	rate := int64(atomic.Load64(&blockprofilerate))
    307 	if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
    308 		return false
    309 	}
    310 	return true
    311 }
    312 
    313 func saveblockevent(cycles int64, skip int, which bucketType, ratep *uint64) {
    314 	gp := getg()
    315 	var nstk int
    316 	var stk [maxStack]uintptr
    317 	if gp.m.curg == nil || gp.m.curg == gp {
    318 		nstk = callers(skip, stk[:])
    319 	} else {
    320 		nstk = gcallers(gp.m.curg, skip, stk[:])
    321 	}
    322 	lock(&proflock)
    323 	b := stkbucket(which, 0, stk[:nstk], true)
    324 	b.bp().count++
    325 	b.bp().cycles += cycles
    326 	unlock(&proflock)
    327 }
    328 
    329 var mutexprofilerate uint64 // fraction sampled
    330 
    331 // SetMutexProfileFraction controls the fraction of mutex contention events
    332 // that are reported in the mutex profile. On average 1/rate events are
    333 // reported. The previous rate is returned.
    334 //
    335 // To turn off profiling entirely, pass rate 0.
    336 // To just read the current rate, pass rate -1.
    337 // (For n>1 the details of sampling may change.)
    338 func SetMutexProfileFraction(rate int) int {
    339 	if rate < 0 {
    340 		return int(mutexprofilerate)
    341 	}
    342 	old := mutexprofilerate
    343 	atomic.Store64(&mutexprofilerate, uint64(rate))
    344 	return int(old)
    345 }
    346 
    347 //go:linkname mutexevent sync.event
    348 func mutexevent(cycles int64, skip int) {
    349 	if cycles < 0 {
    350 		cycles = 0
    351 	}
    352 	rate := int64(atomic.Load64(&mutexprofilerate))
    353 	// TODO(pjw): measure impact of always calling fastrand vs using something
    354 	// like malloc.go:nextSample()
    355 	if rate > 0 && int64(fastrand())%rate == 0 {
    356 		saveblockevent(cycles, skip+1, mutexProfile, &mutexprofilerate)
    357 	}
    358 }
    359 
    360 // Go interface to profile data.
    361 
    362 // A StackRecord describes a single execution stack.
    363 type StackRecord struct {
    364 	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
    365 }
    366 
    367 // Stack returns the stack trace associated with the record,
    368 // a prefix of r.Stack0.
    369 func (r *StackRecord) Stack() []uintptr {
    370 	for i, v := range r.Stack0 {
    371 		if v == 0 {
    372 			return r.Stack0[0:i]
    373 		}
    374 	}
    375 	return r.Stack0[0:]
    376 }
    377 
    378 // MemProfileRate controls the fraction of memory allocations
    379 // that are recorded and reported in the memory profile.
    380 // The profiler aims to sample an average of
    381 // one allocation per MemProfileRate bytes allocated.
    382 //
    383 // To include every allocated block in the profile, set MemProfileRate to 1.
    384 // To turn off profiling entirely, set MemProfileRate to 0.
    385 //
    386 // The tools that process the memory profiles assume that the
    387 // profile rate is constant across the lifetime of the program
    388 // and equal to the current value. Programs that change the
    389 // memory profiling rate should do so just once, as early as
    390 // possible in the execution of the program (for example,
    391 // at the beginning of main).
    392 var MemProfileRate int = 512 * 1024
    393 
    394 // A MemProfileRecord describes the live objects allocated
    395 // by a particular call sequence (stack trace).
    396 type MemProfileRecord struct {
    397 	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
    398 	AllocObjects, FreeObjects int64       // number of objects allocated, freed
    399 	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
    400 }
    401 
    402 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
    403 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
    404 
    405 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
    406 func (r *MemProfileRecord) InUseObjects() int64 {
    407 	return r.AllocObjects - r.FreeObjects
    408 }
    409 
    410 // Stack returns the stack trace associated with the record,
    411 // a prefix of r.Stack0.
    412 func (r *MemProfileRecord) Stack() []uintptr {
    413 	for i, v := range r.Stack0 {
    414 		if v == 0 {
    415 			return r.Stack0[0:i]
    416 		}
    417 	}
    418 	return r.Stack0[0:]
    419 }
    420 
    421 // MemProfile returns a profile of memory allocated and freed per allocation
    422 // site.
    423 //
    424 // MemProfile returns n, the number of records in the current memory profile.
    425 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
    426 // If len(p) < n, MemProfile does not change p and returns n, false.
    427 //
    428 // If inuseZero is true, the profile includes allocation records
    429 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
    430 // These are sites where memory was allocated, but it has all
    431 // been released back to the runtime.
    432 //
    433 // The returned profile may be up to two garbage collection cycles old.
    434 // This is to avoid skewing the profile toward allocations; because
    435 // allocations happen in real time but frees are delayed until the garbage
    436 // collector performs sweeping, the profile only accounts for allocations
    437 // that have had a chance to be freed by the garbage collector.
    438 //
    439 // Most clients should use the runtime/pprof package or
    440 // the testing package's -test.memprofile flag instead
    441 // of calling MemProfile directly.
    442 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
    443 	lock(&proflock)
    444 	clear := true
    445 	for b := mbuckets; b != nil; b = b.allnext {
    446 		mp := b.mp()
    447 		if inuseZero || mp.alloc_bytes != mp.free_bytes {
    448 			n++
    449 		}
    450 		if mp.allocs != 0 || mp.frees != 0 {
    451 			clear = false
    452 		}
    453 	}
    454 	if clear {
    455 		// Absolutely no data, suggesting that a garbage collection
    456 		// has not yet happened. In order to allow profiling when
    457 		// garbage collection is disabled from the beginning of execution,
    458 		// accumulate stats as if a GC just happened, and recount buckets.
    459 		mprof_GC()
    460 		mprof_GC()
    461 		n = 0
    462 		for b := mbuckets; b != nil; b = b.allnext {
    463 			mp := b.mp()
    464 			if inuseZero || mp.alloc_bytes != mp.free_bytes {
    465 				n++
    466 			}
    467 		}
    468 	}
    469 	if n <= len(p) {
    470 		ok = true
    471 		idx := 0
    472 		for b := mbuckets; b != nil; b = b.allnext {
    473 			mp := b.mp()
    474 			if inuseZero || mp.alloc_bytes != mp.free_bytes {
    475 				record(&p[idx], b)
    476 				idx++
    477 			}
    478 		}
    479 	}
    480 	unlock(&proflock)
    481 	return
    482 }
    483 
    484 // Write b's data to r.
    485 func record(r *MemProfileRecord, b *bucket) {
    486 	mp := b.mp()
    487 	r.AllocBytes = int64(mp.alloc_bytes)
    488 	r.FreeBytes = int64(mp.free_bytes)
    489 	r.AllocObjects = int64(mp.allocs)
    490 	r.FreeObjects = int64(mp.frees)
    491 	if raceenabled {
    492 		racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&r)), funcPC(MemProfile))
    493 	}
    494 	if msanenabled {
    495 		msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
    496 	}
    497 	copy(r.Stack0[:], b.stk())
    498 	for i := int(b.nstk); i < len(r.Stack0); i++ {
    499 		r.Stack0[i] = 0
    500 	}
    501 }
    502 
    503 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
    504 	lock(&proflock)
    505 	for b := mbuckets; b != nil; b = b.allnext {
    506 		mp := b.mp()
    507 		fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
    508 	}
    509 	unlock(&proflock)
    510 }
    511 
    512 // BlockProfileRecord describes blocking events originated
    513 // at a particular call sequence (stack trace).
    514 type BlockProfileRecord struct {
    515 	Count  int64
    516 	Cycles int64
    517 	StackRecord
    518 }
    519 
    520 // BlockProfile returns n, the number of records in the current blocking profile.
    521 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
    522 // If len(p) < n, BlockProfile does not change p and returns n, false.
    523 //
    524 // Most clients should use the runtime/pprof package or
    525 // the testing package's -test.blockprofile flag instead
    526 // of calling BlockProfile directly.
    527 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
    528 	lock(&proflock)
    529 	for b := bbuckets; b != nil; b = b.allnext {
    530 		n++
    531 	}
    532 	if n <= len(p) {
    533 		ok = true
    534 		for b := bbuckets; b != nil; b = b.allnext {
    535 			bp := b.bp()
    536 			r := &p[0]
    537 			r.Count = bp.count
    538 			r.Cycles = bp.cycles
    539 			if raceenabled {
    540 				racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&p)), funcPC(BlockProfile))
    541 			}
    542 			if msanenabled {
    543 				msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
    544 			}
    545 			i := copy(r.Stack0[:], b.stk())
    546 			for ; i < len(r.Stack0); i++ {
    547 				r.Stack0[i] = 0
    548 			}
    549 			p = p[1:]
    550 		}
    551 	}
    552 	unlock(&proflock)
    553 	return
    554 }
    555 
    556 // MutexProfile returns n, the number of records in the current mutex profile.
    557 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
    558 // Otherwise, MutexProfile does not change p, and returns n, false.
    559 //
    560 // Most clients should use the runtime/pprof package
    561 // instead of calling MutexProfile directly.
    562 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
    563 	lock(&proflock)
    564 	for b := xbuckets; b != nil; b = b.allnext {
    565 		n++
    566 	}
    567 	if n <= len(p) {
    568 		ok = true
    569 		for b := xbuckets; b != nil; b = b.allnext {
    570 			bp := b.bp()
    571 			r := &p[0]
    572 			r.Count = int64(bp.count)
    573 			r.Cycles = bp.cycles
    574 			i := copy(r.Stack0[:], b.stk())
    575 			for ; i < len(r.Stack0); i++ {
    576 				r.Stack0[i] = 0
    577 			}
    578 			p = p[1:]
    579 		}
    580 	}
    581 	unlock(&proflock)
    582 	return
    583 }
    584 
    585 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
    586 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
    587 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
    588 //
    589 // Most clients should use the runtime/pprof package instead
    590 // of calling ThreadCreateProfile directly.
    591 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
    592 	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
    593 	for mp := first; mp != nil; mp = mp.alllink {
    594 		n++
    595 	}
    596 	if n <= len(p) {
    597 		ok = true
    598 		i := 0
    599 		for mp := first; mp != nil; mp = mp.alllink {
    600 			p[i].Stack0 = mp.createstack
    601 			i++
    602 		}
    603 	}
    604 	return
    605 }
    606 
    607 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
    608 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
    609 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
    610 //
    611 // Most clients should use the runtime/pprof package instead
    612 // of calling GoroutineProfile directly.
    613 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
    614 	gp := getg()
    615 
    616 	isOK := func(gp1 *g) bool {
    617 		// Checking isSystemGoroutine here makes GoroutineProfile
    618 		// consistent with both NumGoroutine and Stack.
    619 		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
    620 	}
    621 
    622 	stopTheWorld("profile")
    623 
    624 	n = 1
    625 	for _, gp1 := range allgs {
    626 		if isOK(gp1) {
    627 			n++
    628 		}
    629 	}
    630 
    631 	if n <= len(p) {
    632 		ok = true
    633 		r := p
    634 
    635 		// Save current goroutine.
    636 		sp := getcallersp(unsafe.Pointer(&p))
    637 		pc := getcallerpc(unsafe.Pointer(&p))
    638 		systemstack(func() {
    639 			saveg(pc, sp, gp, &r[0])
    640 		})
    641 		r = r[1:]
    642 
    643 		// Save other goroutines.
    644 		for _, gp1 := range allgs {
    645 			if isOK(gp1) {
    646 				if len(r) == 0 {
    647 					// Should be impossible, but better to return a
    648 					// truncated profile than to crash the entire process.
    649 					break
    650 				}
    651 				saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
    652 				r = r[1:]
    653 			}
    654 		}
    655 	}
    656 
    657 	startTheWorld()
    658 
    659 	return n, ok
    660 }
    661 
    662 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
    663 	n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
    664 	if n < len(r.Stack0) {
    665 		r.Stack0[n] = 0
    666 	}
    667 }
    668 
    669 // Stack formats a stack trace of the calling goroutine into buf
    670 // and returns the number of bytes written to buf.
    671 // If all is true, Stack formats stack traces of all other goroutines
    672 // into buf after the trace for the current goroutine.
    673 func Stack(buf []byte, all bool) int {
    674 	if all {
    675 		stopTheWorld("stack trace")
    676 	}
    677 
    678 	n := 0
    679 	if len(buf) > 0 {
    680 		gp := getg()
    681 		sp := getcallersp(unsafe.Pointer(&buf))
    682 		pc := getcallerpc(unsafe.Pointer(&buf))
    683 		systemstack(func() {
    684 			g0 := getg()
    685 			// Force traceback=1 to override GOTRACEBACK setting,
    686 			// so that Stack's results are consistent.
    687 			// GOTRACEBACK is only about crash dumps.
    688 			g0.m.traceback = 1
    689 			g0.writebuf = buf[0:0:len(buf)]
    690 			goroutineheader(gp)
    691 			traceback(pc, sp, 0, gp)
    692 			if all {
    693 				tracebackothers(gp)
    694 			}
    695 			g0.m.traceback = 0
    696 			n = len(g0.writebuf)
    697 			g0.writebuf = nil
    698 		})
    699 	}
    700 
    701 	if all {
    702 		startTheWorld()
    703 	}
    704 	return n
    705 }
    706 
    707 // Tracing of alloc/free/gc.
    708 
    709 var tracelock mutex
    710 
    711 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
    712 	lock(&tracelock)
    713 	gp := getg()
    714 	gp.m.traceback = 2
    715 	if typ == nil {
    716 		print("tracealloc(", p, ", ", hex(size), ")\n")
    717 	} else {
    718 		print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
    719 	}
    720 	if gp.m.curg == nil || gp == gp.m.curg {
    721 		goroutineheader(gp)
    722 		pc := getcallerpc(unsafe.Pointer(&p))
    723 		sp := getcallersp(unsafe.Pointer(&p))
    724 		systemstack(func() {
    725 			traceback(pc, sp, 0, gp)
    726 		})
    727 	} else {
    728 		goroutineheader(gp.m.curg)
    729 		traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
    730 	}
    731 	print("\n")
    732 	gp.m.traceback = 0
    733 	unlock(&tracelock)
    734 }
    735 
    736 func tracefree(p unsafe.Pointer, size uintptr) {
    737 	lock(&tracelock)
    738 	gp := getg()
    739 	gp.m.traceback = 2
    740 	print("tracefree(", p, ", ", hex(size), ")\n")
    741 	goroutineheader(gp)
    742 	pc := getcallerpc(unsafe.Pointer(&p))
    743 	sp := getcallersp(unsafe.Pointer(&p))
    744 	systemstack(func() {
    745 		traceback(pc, sp, 0, gp)
    746 	})
    747 	print("\n")
    748 	gp.m.traceback = 0
    749 	unlock(&tracelock)
    750 }
    751 
    752 func tracegc() {
    753 	lock(&tracelock)
    754 	gp := getg()
    755 	gp.m.traceback = 2
    756 	print("tracegc()\n")
    757 	// running on m->g0 stack; show all non-g0 goroutines
    758 	tracebackothers(gp)
    759 	print("end tracegc\n")
    760 	print("\n")
    761 	gp.m.traceback = 0
    762 	unlock(&tracelock)
    763 }
    764