Home | History | Annotate | Download | only in pprof
      1 // Copyright 2011 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // +build !nacl
      6 
      7 package pprof
      8 
      9 import (
     10 	"bytes"
     11 	"context"
     12 	"fmt"
     13 	"internal/testenv"
     14 	"io"
     15 	"io/ioutil"
     16 	"math/big"
     17 	"os"
     18 	"os/exec"
     19 	"regexp"
     20 	"runtime"
     21 	"runtime/pprof/internal/profile"
     22 	"strings"
     23 	"sync"
     24 	"sync/atomic"
     25 	"testing"
     26 	"time"
     27 )
     28 
     29 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
     30 	// We only need to get one 100 Hz clock tick, so we've got
     31 	// a large safety buffer.
     32 	// But do at least 500 iterations (which should take about 100ms),
     33 	// otherwise TestCPUProfileMultithreaded can fail if only one
     34 	// thread is scheduled during the testing period.
     35 	t0 := time.Now()
     36 	accum := *y
     37 	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
     38 		accum = f(accum)
     39 	}
     40 	*y = accum
     41 }
     42 
     43 var (
     44 	salt1 = 0
     45 	salt2 = 0
     46 )
     47 
     48 // The actual CPU hogging function.
     49 // Must not call other functions nor access heap/globals in the loop,
     50 // otherwise under race detector the samples will be in the race runtime.
     51 func cpuHog1(x int) int {
     52 	foo := x
     53 	for i := 0; i < 1e5; i++ {
     54 		if foo > 0 {
     55 			foo *= foo
     56 		} else {
     57 			foo *= foo + 1
     58 		}
     59 	}
     60 	return foo
     61 }
     62 
     63 func cpuHog2(x int) int {
     64 	foo := x
     65 	for i := 0; i < 1e5; i++ {
     66 		if foo > 0 {
     67 			foo *= foo
     68 		} else {
     69 			foo *= foo + 2
     70 		}
     71 	}
     72 	return foo
     73 }
     74 
     75 func TestCPUProfile(t *testing.T) {
     76 	testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) {
     77 		cpuHogger(cpuHog1, &salt1, dur)
     78 	})
     79 }
     80 
     81 func TestCPUProfileMultithreaded(t *testing.T) {
     82 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
     83 	testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) {
     84 		c := make(chan int)
     85 		go func() {
     86 			cpuHogger(cpuHog1, &salt1, dur)
     87 			c <- 1
     88 		}()
     89 		cpuHogger(cpuHog2, &salt2, dur)
     90 		<-c
     91 	})
     92 }
     93 
     94 func TestCPUProfileInlining(t *testing.T) {
     95 	testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) {
     96 		cpuHogger(inlinedCaller, &salt1, dur)
     97 	})
     98 }
     99 
    100 func inlinedCaller(x int) int {
    101 	x = inlinedCallee(x)
    102 	return x
    103 }
    104 
    105 func inlinedCallee(x int) int {
    106 	// We could just use cpuHog1, but for loops prevent inlining
    107 	// right now. :(
    108 	foo := x
    109 	i := 0
    110 loop:
    111 	if foo > 0 {
    112 		foo *= foo
    113 	} else {
    114 		foo *= foo + 1
    115 	}
    116 	if i++; i < 1e5 {
    117 		goto loop
    118 	}
    119 	return foo
    120 }
    121 
    122 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
    123 	p, err := profile.Parse(bytes.NewReader(valBytes))
    124 	if err != nil {
    125 		t.Fatal(err)
    126 	}
    127 	for _, sample := range p.Sample {
    128 		count := uintptr(sample.Value[0])
    129 		f(count, sample.Location, sample.Label)
    130 	}
    131 }
    132 
    133 func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
    134 	switch runtime.GOOS {
    135 	case "darwin":
    136 		switch runtime.GOARCH {
    137 		case "arm", "arm64":
    138 			// nothing
    139 		default:
    140 			out, err := exec.Command("uname", "-a").CombinedOutput()
    141 			if err != nil {
    142 				t.Fatal(err)
    143 			}
    144 			vers := string(out)
    145 			t.Logf("uname -a: %v", vers)
    146 		}
    147 	case "plan9":
    148 		t.Skip("skipping on plan9")
    149 	}
    150 
    151 	const maxDuration = 5 * time.Second
    152 	// If we're running a long test, start with a long duration
    153 	// for tests that try to make sure something *doesn't* happen.
    154 	duration := 5 * time.Second
    155 	if testing.Short() {
    156 		duration = 200 * time.Millisecond
    157 	}
    158 
    159 	// Profiling tests are inherently flaky, especially on a
    160 	// loaded system, such as when this test is running with
    161 	// several others under go test std. If a test fails in a way
    162 	// that could mean it just didn't run long enough, try with a
    163 	// longer duration.
    164 	for duration <= maxDuration {
    165 		var prof bytes.Buffer
    166 		if err := StartCPUProfile(&prof); err != nil {
    167 			t.Fatal(err)
    168 		}
    169 		f(duration)
    170 		StopCPUProfile()
    171 
    172 		if profileOk(t, need, prof, duration) {
    173 			return
    174 		}
    175 
    176 		duration *= 2
    177 		if duration <= maxDuration {
    178 			t.Logf("retrying with %s duration", duration)
    179 		}
    180 	}
    181 
    182 	switch runtime.GOOS {
    183 	case "darwin", "dragonfly", "netbsd", "solaris":
    184 		t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS)
    185 	}
    186 	// Ignore the failure if the tests are running in a QEMU-based emulator,
    187 	// QEMU is not perfect at emulating everything.
    188 	// IN_QEMU environmental variable is set by some of the Go builders.
    189 	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
    190 	if os.Getenv("IN_QEMU") == "1" {
    191 		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
    192 	}
    193 	t.FailNow()
    194 }
    195 
    196 func contains(slice []string, s string) bool {
    197 	for i := range slice {
    198 		if slice[i] == s {
    199 			return true
    200 		}
    201 	}
    202 	return false
    203 }
    204 
    205 func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
    206 	ok = true
    207 
    208 	// Check that profile is well formed and contains need.
    209 	have := make([]uintptr, len(need))
    210 	var samples uintptr
    211 	var buf bytes.Buffer
    212 	parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
    213 		fmt.Fprintf(&buf, "%d:", count)
    214 		fprintStack(&buf, stk)
    215 		samples += count
    216 		for i, name := range need {
    217 			if semi := strings.Index(name, ";"); semi > -1 {
    218 				kv := strings.SplitN(name[semi+1:], "=", 2)
    219 				if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
    220 					continue
    221 				}
    222 				name = name[:semi]
    223 			}
    224 			for _, loc := range stk {
    225 				for _, line := range loc.Line {
    226 					if strings.Contains(line.Function.Name, name) {
    227 						have[i] += count
    228 					}
    229 				}
    230 			}
    231 		}
    232 		fmt.Fprintf(&buf, "\n")
    233 	})
    234 	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
    235 
    236 	if samples < 10 && runtime.GOOS == "windows" {
    237 		// On some windows machines we end up with
    238 		// not enough samples due to coarse timer
    239 		// resolution. Let it go.
    240 		t.Log("too few samples on Windows (golang.org/issue/10842)")
    241 		return false
    242 	}
    243 
    244 	// Check that we got a reasonable number of samples.
    245 	// We used to always require at least ideal/4 samples,
    246 	// but that is too hard to guarantee on a loaded system.
    247 	// Now we accept 10 or more samples, which we take to be
    248 	// enough to show that at least some profiling is occurring.
    249 	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
    250 		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
    251 		ok = false
    252 	}
    253 
    254 	if len(need) == 0 {
    255 		return ok
    256 	}
    257 
    258 	var total uintptr
    259 	for i, name := range need {
    260 		total += have[i]
    261 		t.Logf("%s: %d\n", name, have[i])
    262 	}
    263 	if total == 0 {
    264 		t.Logf("no samples in expected functions")
    265 		ok = false
    266 	}
    267 	// We'd like to check a reasonable minimum, like
    268 	// total / len(have) / smallconstant, but this test is
    269 	// pretty flaky (see bug 7095).  So we'll just test to
    270 	// make sure we got at least one sample.
    271 	min := uintptr(1)
    272 	for i, name := range need {
    273 		if have[i] < min {
    274 			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
    275 			ok = false
    276 		}
    277 	}
    278 	return ok
    279 }
    280 
    281 // Fork can hang if preempted with signals frequently enough (see issue 5517).
    282 // Ensure that we do not do this.
    283 func TestCPUProfileWithFork(t *testing.T) {
    284 	testenv.MustHaveExec(t)
    285 
    286 	heap := 1 << 30
    287 	if runtime.GOOS == "android" {
    288 		// Use smaller size for Android to avoid crash.
    289 		heap = 100 << 20
    290 	}
    291 	if testing.Short() {
    292 		heap = 100 << 20
    293 	}
    294 	// This makes fork slower.
    295 	garbage := make([]byte, heap)
    296 	// Need to touch the slice, otherwise it won't be paged in.
    297 	done := make(chan bool)
    298 	go func() {
    299 		for i := range garbage {
    300 			garbage[i] = 42
    301 		}
    302 		done <- true
    303 	}()
    304 	<-done
    305 
    306 	var prof bytes.Buffer
    307 	if err := StartCPUProfile(&prof); err != nil {
    308 		t.Fatal(err)
    309 	}
    310 	defer StopCPUProfile()
    311 
    312 	for i := 0; i < 10; i++ {
    313 		exec.Command(os.Args[0], "-h").CombinedOutput()
    314 	}
    315 }
    316 
    317 // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
    318 // If it did, it would see inconsistent state and would either record an incorrect stack
    319 // or crash because the stack was malformed.
    320 func TestGoroutineSwitch(t *testing.T) {
    321 	// How much to try. These defaults take about 1 seconds
    322 	// on a 2012 MacBook Pro. The ones in short mode take
    323 	// about 0.1 seconds.
    324 	tries := 10
    325 	count := 1000000
    326 	if testing.Short() {
    327 		tries = 1
    328 	}
    329 	for try := 0; try < tries; try++ {
    330 		var prof bytes.Buffer
    331 		if err := StartCPUProfile(&prof); err != nil {
    332 			t.Fatal(err)
    333 		}
    334 		for i := 0; i < count; i++ {
    335 			runtime.Gosched()
    336 		}
    337 		StopCPUProfile()
    338 
    339 		// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
    340 		// The special entry
    341 		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
    342 			// An entry with two frames with 'System' in its top frame
    343 			// exists to record a PC without a traceback. Those are okay.
    344 			if len(stk) == 2 {
    345 				name := stk[1].Line[0].Function.Name
    346 				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
    347 					return
    348 				}
    349 			}
    350 
    351 			// Otherwise, should not see runtime.gogo.
    352 			// The place we'd see it would be the inner most frame.
    353 			name := stk[0].Line[0].Function.Name
    354 			if name == "runtime.gogo" {
    355 				var buf bytes.Buffer
    356 				fprintStack(&buf, stk)
    357 				t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
    358 			}
    359 		})
    360 	}
    361 }
    362 
    363 func fprintStack(w io.Writer, stk []*profile.Location) {
    364 	for _, loc := range stk {
    365 		fmt.Fprintf(w, " %#x", loc.Address)
    366 		fmt.Fprintf(w, " (")
    367 		for i, line := range loc.Line {
    368 			if i > 0 {
    369 				fmt.Fprintf(w, " ")
    370 			}
    371 			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
    372 		}
    373 		fmt.Fprintf(w, ")")
    374 	}
    375 	fmt.Fprintf(w, "\n")
    376 }
    377 
    378 // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
    379 func TestMathBigDivide(t *testing.T) {
    380 	testCPUProfile(t, nil, func(duration time.Duration) {
    381 		t := time.After(duration)
    382 		pi := new(big.Int)
    383 		for {
    384 			for i := 0; i < 100; i++ {
    385 				n := big.NewInt(2646693125139304345)
    386 				d := big.NewInt(842468587426513207)
    387 				pi.Div(n, d)
    388 			}
    389 			select {
    390 			case <-t:
    391 				return
    392 			default:
    393 			}
    394 		}
    395 	})
    396 }
    397 
    398 func TestBlockProfile(t *testing.T) {
    399 	type TestCase struct {
    400 		name string
    401 		f    func()
    402 		stk  []string
    403 		re   string
    404 	}
    405 	tests := [...]TestCase{
    406 		{
    407 			name: "chan recv",
    408 			f:    blockChanRecv,
    409 			stk: []string{
    410 				"runtime.chanrecv1",
    411 				"runtime/pprof.blockChanRecv",
    412 				"runtime/pprof.TestBlockProfile",
    413 			},
    414 			re: `
    415 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    416 #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
    417 #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    418 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    419 `},
    420 		{
    421 			name: "chan send",
    422 			f:    blockChanSend,
    423 			stk: []string{
    424 				"runtime.chansend1",
    425 				"runtime/pprof.blockChanSend",
    426 				"runtime/pprof.TestBlockProfile",
    427 			},
    428 			re: `
    429 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    430 #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
    431 #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    432 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    433 `},
    434 		{
    435 			name: "chan close",
    436 			f:    blockChanClose,
    437 			stk: []string{
    438 				"runtime.chanrecv1",
    439 				"runtime/pprof.blockChanClose",
    440 				"runtime/pprof.TestBlockProfile",
    441 			},
    442 			re: `
    443 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    444 #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
    445 #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    446 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    447 `},
    448 		{
    449 			name: "select recv async",
    450 			f:    blockSelectRecvAsync,
    451 			stk: []string{
    452 				"runtime.selectgo",
    453 				"runtime/pprof.blockSelectRecvAsync",
    454 				"runtime/pprof.TestBlockProfile",
    455 			},
    456 			re: `
    457 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    458 #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
    459 #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    460 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    461 `},
    462 		{
    463 			name: "select send sync",
    464 			f:    blockSelectSendSync,
    465 			stk: []string{
    466 				"runtime.selectgo",
    467 				"runtime/pprof.blockSelectSendSync",
    468 				"runtime/pprof.TestBlockProfile",
    469 			},
    470 			re: `
    471 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    472 #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
    473 #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    474 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    475 `},
    476 		{
    477 			name: "mutex",
    478 			f:    blockMutex,
    479 			stk: []string{
    480 				"sync.(*Mutex).Lock",
    481 				"runtime/pprof.blockMutex",
    482 				"runtime/pprof.TestBlockProfile",
    483 			},
    484 			re: `
    485 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    486 #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
    487 #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    488 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    489 `},
    490 		{
    491 			name: "cond",
    492 			f:    blockCond,
    493 			stk: []string{
    494 				"sync.(*Cond).Wait",
    495 				"runtime/pprof.blockCond",
    496 				"runtime/pprof.TestBlockProfile",
    497 			},
    498 			re: `
    499 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
    500 #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
    501 #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    502 #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
    503 `},
    504 	}
    505 
    506 	// Generate block profile
    507 	runtime.SetBlockProfileRate(1)
    508 	defer runtime.SetBlockProfileRate(0)
    509 	for _, test := range tests {
    510 		test.f()
    511 	}
    512 
    513 	t.Run("debug=1", func(t *testing.T) {
    514 		var w bytes.Buffer
    515 		Lookup("block").WriteTo(&w, 1)
    516 		prof := w.String()
    517 
    518 		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
    519 			t.Fatalf("Bad profile header:\n%v", prof)
    520 		}
    521 
    522 		if strings.HasSuffix(prof, "#\t0x0\n\n") {
    523 			t.Errorf("Useless 0 suffix:\n%v", prof)
    524 		}
    525 
    526 		for _, test := range tests {
    527 			if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
    528 				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
    529 			}
    530 		}
    531 	})
    532 
    533 	t.Run("proto", func(t *testing.T) {
    534 		// proto format
    535 		var w bytes.Buffer
    536 		Lookup("block").WriteTo(&w, 0)
    537 		p, err := profile.Parse(&w)
    538 		if err != nil {
    539 			t.Fatalf("failed to parse profile: %v", err)
    540 		}
    541 		t.Logf("parsed proto: %s", p)
    542 		if err := p.CheckValid(); err != nil {
    543 			t.Fatalf("invalid profile: %v", err)
    544 		}
    545 
    546 		stks := stacks(p)
    547 		for _, test := range tests {
    548 			if !containsStack(stks, test.stk) {
    549 				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
    550 			}
    551 		}
    552 	})
    553 
    554 }
    555 
    556 func stacks(p *profile.Profile) (res [][]string) {
    557 	for _, s := range p.Sample {
    558 		var stk []string
    559 		for _, l := range s.Location {
    560 			for _, line := range l.Line {
    561 				stk = append(stk, line.Function.Name)
    562 			}
    563 		}
    564 		res = append(res, stk)
    565 	}
    566 	return res
    567 }
    568 
    569 func containsStack(got [][]string, want []string) bool {
    570 	for _, stk := range got {
    571 		if len(stk) < len(want) {
    572 			continue
    573 		}
    574 		for i, f := range want {
    575 			if f != stk[i] {
    576 				break
    577 			}
    578 			if i == len(want)-1 {
    579 				return true
    580 			}
    581 		}
    582 	}
    583 	return false
    584 }
    585 
    586 const blockDelay = 10 * time.Millisecond
    587 
    588 func blockChanRecv() {
    589 	c := make(chan bool)
    590 	go func() {
    591 		time.Sleep(blockDelay)
    592 		c <- true
    593 	}()
    594 	<-c
    595 }
    596 
    597 func blockChanSend() {
    598 	c := make(chan bool)
    599 	go func() {
    600 		time.Sleep(blockDelay)
    601 		<-c
    602 	}()
    603 	c <- true
    604 }
    605 
    606 func blockChanClose() {
    607 	c := make(chan bool)
    608 	go func() {
    609 		time.Sleep(blockDelay)
    610 		close(c)
    611 	}()
    612 	<-c
    613 }
    614 
    615 func blockSelectRecvAsync() {
    616 	const numTries = 3
    617 	c := make(chan bool, 1)
    618 	c2 := make(chan bool, 1)
    619 	go func() {
    620 		for i := 0; i < numTries; i++ {
    621 			time.Sleep(blockDelay)
    622 			c <- true
    623 		}
    624 	}()
    625 	for i := 0; i < numTries; i++ {
    626 		select {
    627 		case <-c:
    628 		case <-c2:
    629 		}
    630 	}
    631 }
    632 
    633 func blockSelectSendSync() {
    634 	c := make(chan bool)
    635 	c2 := make(chan bool)
    636 	go func() {
    637 		time.Sleep(blockDelay)
    638 		<-c
    639 	}()
    640 	select {
    641 	case c <- true:
    642 	case c2 <- true:
    643 	}
    644 }
    645 
    646 func blockMutex() {
    647 	var mu sync.Mutex
    648 	mu.Lock()
    649 	go func() {
    650 		time.Sleep(blockDelay)
    651 		mu.Unlock()
    652 	}()
    653 	// Note: Unlock releases mu before recording the mutex event,
    654 	// so it's theoretically possible for this to proceed and
    655 	// capture the profile before the event is recorded. As long
    656 	// as this is blocked before the unlock happens, it's okay.
    657 	mu.Lock()
    658 }
    659 
    660 func blockCond() {
    661 	var mu sync.Mutex
    662 	c := sync.NewCond(&mu)
    663 	mu.Lock()
    664 	go func() {
    665 		time.Sleep(blockDelay)
    666 		mu.Lock()
    667 		c.Signal()
    668 		mu.Unlock()
    669 	}()
    670 	c.Wait()
    671 	mu.Unlock()
    672 }
    673 
    674 func TestMutexProfile(t *testing.T) {
    675 	// Generate mutex profile
    676 
    677 	old := runtime.SetMutexProfileFraction(1)
    678 	defer runtime.SetMutexProfileFraction(old)
    679 	if old != 0 {
    680 		t.Fatalf("need MutexProfileRate 0, got %d", old)
    681 	}
    682 
    683 	blockMutex()
    684 
    685 	t.Run("debug=1", func(t *testing.T) {
    686 		var w bytes.Buffer
    687 		Lookup("mutex").WriteTo(&w, 1)
    688 		prof := w.String()
    689 		t.Logf("received profile: %v", prof)
    690 
    691 		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
    692 			t.Errorf("Bad profile header:\n%v", prof)
    693 		}
    694 		prof = strings.Trim(prof, "\n")
    695 		lines := strings.Split(prof, "\n")
    696 		if len(lines) != 6 {
    697 			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
    698 		}
    699 		if len(lines) < 6 {
    700 			return
    701 		}
    702 		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
    703 		r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+`
    704 		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
    705 		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
    706 			t.Errorf("%q didn't match %q", lines[3], r2)
    707 		}
    708 		r3 := "^#.*runtime/pprof.blockMutex.*$"
    709 		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
    710 			t.Errorf("%q didn't match %q", lines[5], r3)
    711 		}
    712 		t.Logf(prof)
    713 	})
    714 	t.Run("proto", func(t *testing.T) {
    715 		// proto format
    716 		var w bytes.Buffer
    717 		Lookup("mutex").WriteTo(&w, 0)
    718 		p, err := profile.Parse(&w)
    719 		if err != nil {
    720 			t.Fatalf("failed to parse profile: %v", err)
    721 		}
    722 		t.Logf("parsed proto: %s", p)
    723 		if err := p.CheckValid(); err != nil {
    724 			t.Fatalf("invalid profile: %v", err)
    725 		}
    726 
    727 		stks := stacks(p)
    728 		for _, want := range [][]string{
    729 			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
    730 		} {
    731 			if !containsStack(stks, want) {
    732 				t.Errorf("No matching stack entry for %+v", want)
    733 			}
    734 		}
    735 	})
    736 }
    737 
    738 func func1(c chan int) { <-c }
    739 func func2(c chan int) { <-c }
    740 func func3(c chan int) { <-c }
    741 func func4(c chan int) { <-c }
    742 
    743 func TestGoroutineCounts(t *testing.T) {
    744 	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
    745 	// desired blocking point.
    746 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
    747 
    748 	c := make(chan int)
    749 	for i := 0; i < 100; i++ {
    750 		switch {
    751 		case i%10 == 0:
    752 			go func1(c)
    753 		case i%2 == 0:
    754 			go func2(c)
    755 		default:
    756 			go func3(c)
    757 		}
    758 		// Let goroutines block on channel
    759 		for j := 0; j < 5; j++ {
    760 			runtime.Gosched()
    761 		}
    762 	}
    763 
    764 	var w bytes.Buffer
    765 	goroutineProf := Lookup("goroutine")
    766 
    767 	// Check debug profile
    768 	goroutineProf.WriteTo(&w, 1)
    769 	prof := w.String()
    770 
    771 	if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
    772 		t.Errorf("expected sorted goroutine counts:\n%s", prof)
    773 	}
    774 
    775 	// Check proto profile
    776 	w.Reset()
    777 	goroutineProf.WriteTo(&w, 0)
    778 	p, err := profile.Parse(&w)
    779 	if err != nil {
    780 		t.Errorf("error parsing protobuf profile: %v", err)
    781 	}
    782 	if err := p.CheckValid(); err != nil {
    783 		t.Errorf("protobuf profile is invalid: %v", err)
    784 	}
    785 	if !containsCounts(p, []int64{50, 40, 10, 1}) {
    786 		t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
    787 			[]int64{50, 40, 10, 1}, p)
    788 	}
    789 
    790 	close(c)
    791 
    792 	time.Sleep(10 * time.Millisecond) // let goroutines exit
    793 }
    794 
    795 func containsInOrder(s string, all ...string) bool {
    796 	for _, t := range all {
    797 		i := strings.Index(s, t)
    798 		if i < 0 {
    799 			return false
    800 		}
    801 		s = s[i+len(t):]
    802 	}
    803 	return true
    804 }
    805 
    806 func containsCounts(prof *profile.Profile, counts []int64) bool {
    807 	m := make(map[int64]int)
    808 	for _, c := range counts {
    809 		m[c]++
    810 	}
    811 	for _, s := range prof.Sample {
    812 		// The count is the single value in the sample
    813 		if len(s.Value) != 1 {
    814 			return false
    815 		}
    816 		m[s.Value[0]]--
    817 	}
    818 	for _, n := range m {
    819 		if n > 0 {
    820 			return false
    821 		}
    822 	}
    823 	return true
    824 }
    825 
    826 // Issue 18836.
    827 func TestEmptyCallStack(t *testing.T) {
    828 	t.Parallel()
    829 	var buf bytes.Buffer
    830 	p := NewProfile("test18836")
    831 	p.Add("foo", 47674)
    832 	p.WriteTo(&buf, 1)
    833 	p.Remove("foo")
    834 	got := buf.String()
    835 	prefix := "test18836 profile: total 1\n"
    836 	if !strings.HasPrefix(got, prefix) {
    837 		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
    838 	}
    839 	lostevent := "lostProfileEvent"
    840 	if !strings.Contains(got, lostevent) {
    841 		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
    842 	}
    843 }
    844 
    845 func TestCPUProfileLabel(t *testing.T) {
    846 	testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
    847 		Do(context.Background(), Labels("key", "value"), func(context.Context) {
    848 			cpuHogger(cpuHog1, &salt1, dur)
    849 		})
    850 	})
    851 }
    852 
    853 func TestLabelRace(t *testing.T) {
    854 	// Test the race detector annotations for synchronization
    855 	// between settings labels and consuming them from the
    856 	// profile.
    857 	testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
    858 		start := time.Now()
    859 		var wg sync.WaitGroup
    860 		for time.Since(start) < dur {
    861 			var salts [10]int
    862 			for i := 0; i < 10; i++ {
    863 				wg.Add(1)
    864 				go func(j int) {
    865 					Do(context.Background(), Labels("key", "value"), func(context.Context) {
    866 						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
    867 					})
    868 					wg.Done()
    869 				}(i)
    870 			}
    871 			wg.Wait()
    872 		}
    873 	})
    874 }
    875 
    876 // Check that there is no deadlock when the program receives SIGPROF while in
    877 // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
    878 func TestAtomicLoadStore64(t *testing.T) {
    879 	f, err := ioutil.TempFile("", "profatomic")
    880 	if err != nil {
    881 		t.Fatalf("TempFile: %v", err)
    882 	}
    883 	defer os.Remove(f.Name())
    884 	defer f.Close()
    885 
    886 	if err := StartCPUProfile(f); err != nil {
    887 		t.Fatal(err)
    888 	}
    889 	defer StopCPUProfile()
    890 
    891 	var flag uint64
    892 	done := make(chan bool, 1)
    893 
    894 	go func() {
    895 		for atomic.LoadUint64(&flag) == 0 {
    896 			runtime.Gosched()
    897 		}
    898 		done <- true
    899 	}()
    900 	time.Sleep(50 * time.Millisecond)
    901 	atomic.StoreUint64(&flag, 1)
    902 	<-done
    903 }
    904