Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Go execution tracer.
      6 // The tracer captures a wide range of execution events like goroutine
      7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
      8 // changes of heap size, processor start/stop, etc and writes them to a buffer
      9 // in a compact form. A precise nanosecond-precision timestamp and a stack
     10 // trace is captured for most events.
     11 // See https://golang.org/s/go15trace for more info.
     12 
     13 package runtime
     14 
     15 import (
     16 	"runtime/internal/sys"
     17 	"unsafe"
     18 )
     19 
     20 // Event types in the trace, args are given in square brackets.
     21 const (
     22 	traceEvNone              = 0  // unused
     23 	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
     24 	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
     25 	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
     26 	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
     27 	traceEvProcStart         = 5  // start of P [timestamp, thread id]
     28 	traceEvProcStop          = 6  // stop of P [timestamp]
     29 	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
     30 	traceEvGCDone            = 8  // GC done [timestamp]
     31 	traceEvGCSTWStart        = 9  // GC STW start [timestamp, kind]
     32 	traceEvGCSTWDone         = 10 // GC STW done [timestamp]
     33 	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
     34 	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
     35 	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
     36 	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
     37 	traceEvGoEnd             = 15 // goroutine ends [timestamp]
     38 	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
     39 	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
     40 	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
     41 	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
     42 	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
     43 	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
     44 	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
     45 	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
     46 	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
     47 	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
     48 	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
     49 	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
     50 	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
     51 	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
     52 	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
     53 	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
     54 	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
     55 	traceEvHeapAlloc         = 33 // memstats.heap_live change [timestamp, heap_alloc]
     56 	traceEvNextGC            = 34 // memstats.next_gc change [timestamp, next_gc]
     57 	traceEvTimerGoroutine    = 35 // denotes timer goroutine [timer goroutine id]
     58 	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
     59 	traceEvString            = 37 // string dictionary entry [ID, length, string]
     60 	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
     61 	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
     62 	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
     63 	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
     64 	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
     65 	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
     66 	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
     67 	traceEvCount             = 45
     68 )
     69 
     70 const (
     71 	// Timestamps in trace are cputicks/traceTickDiv.
     72 	// This makes absolute values of timestamp diffs smaller,
     73 	// and so they are encoded in less number of bytes.
     74 	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
     75 	// The suggested increment frequency for PowerPC's time base register is
     76 	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
     77 	// and ppc64le.
     78 	// Tracing won't work reliably for architectures where cputicks is emulated
     79 	// by nanotime, so the value doesn't matter for those architectures.
     80 	traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
     81 	// Maximum number of PCs in a single stack trace.
     82 	// Since events contain only stack id rather than whole stack trace,
     83 	// we can allow quite large values here.
     84 	traceStackSize = 128
     85 	// Identifier of a fake P that is used when we trace without a real P.
     86 	traceGlobProc = -1
     87 	// Maximum number of bytes to encode uint64 in base-128.
     88 	traceBytesPerNumber = 10
     89 	// Shift of the number of arguments in the first event byte.
     90 	traceArgCountShift = 6
     91 	// Flag passed to traceGoPark to denote that the previous wakeup of this
     92 	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
     93 	// but another goroutine got ahead and acquired the mutex before the first
     94 	// goroutine is scheduled, so the first goroutine has to block again.
     95 	// Such wakeups happen on buffered channels and sync.Mutex,
     96 	// but are generally not interesting for end user.
     97 	traceFutileWakeup byte = 128
     98 )
     99 
    100 // trace is global tracing context.
    101 var trace struct {
    102 	lock          mutex       // protects the following members
    103 	lockOwner     *g          // to avoid deadlocks during recursive lock locks
    104 	enabled       bool        // when set runtime traces events
    105 	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
    106 	headerWritten bool        // whether ReadTrace has emitted trace header
    107 	footerWritten bool        // whether ReadTrace has emitted trace footer
    108 	shutdownSema  uint32      // used to wait for ReadTrace completion
    109 	seqStart      uint64      // sequence number when tracing was started
    110 	ticksStart    int64       // cputicks when tracing was started
    111 	ticksEnd      int64       // cputicks when tracing was stopped
    112 	timeStart     int64       // nanotime when tracing was started
    113 	timeEnd       int64       // nanotime when tracing was stopped
    114 	seqGC         uint64      // GC start/done sequencer
    115 	reading       traceBufPtr // buffer currently handed off to user
    116 	empty         traceBufPtr // stack of empty buffers
    117 	fullHead      traceBufPtr // queue of full buffers
    118 	fullTail      traceBufPtr
    119 	reader        guintptr        // goroutine that called ReadTrace, or nil
    120 	stackTab      traceStackTable // maps stack traces to unique ids
    121 
    122 	// Dictionary for traceEvString.
    123 	//
    124 	// Currently this is used only at trace setup and for
    125 	// func/file:line info after tracing session, so we assume
    126 	// single-threaded access.
    127 	strings   map[string]uint64
    128 	stringSeq uint64
    129 
    130 	// markWorkerLabels maps gcMarkWorkerMode to string ID.
    131 	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
    132 
    133 	bufLock mutex       // protects buf
    134 	buf     traceBufPtr // global trace buffer, used when running without a p
    135 }
    136 
    137 // traceBufHeader is per-P tracing buffer.
    138 type traceBufHeader struct {
    139 	link      traceBufPtr             // in trace.empty/full
    140 	lastTicks uint64                  // when we wrote the last event
    141 	pos       int                     // next write offset in arr
    142 	stk       [traceStackSize]uintptr // scratch buffer for traceback
    143 }
    144 
    145 // traceBuf is per-P tracing buffer.
    146 //
    147 //go:notinheap
    148 type traceBuf struct {
    149 	traceBufHeader
    150 	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
    151 }
    152 
    153 // traceBufPtr is a *traceBuf that is not traced by the garbage
    154 // collector and doesn't have write barriers. traceBufs are not
    155 // allocated from the GC'd heap, so this is safe, and are often
    156 // manipulated in contexts where write barriers are not allowed, so
    157 // this is necessary.
    158 //
    159 // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
    160 type traceBufPtr uintptr
    161 
    162 func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
    163 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
    164 func traceBufPtrOf(b *traceBuf) traceBufPtr {
    165 	return traceBufPtr(unsafe.Pointer(b))
    166 }
    167 
    168 // StartTrace enables tracing for the current process.
    169 // While tracing, the data will be buffered and available via ReadTrace.
    170 // StartTrace returns an error if tracing is already enabled.
    171 // Most clients should use the runtime/trace package or the testing package's
    172 // -test.trace flag instead of calling StartTrace directly.
    173 func StartTrace() error {
    174 	// Stop the world, so that we can take a consistent snapshot
    175 	// of all goroutines at the beginning of the trace.
    176 	stopTheWorld("start tracing")
    177 
    178 	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
    179 	// Exitsyscall could check trace.enabled long before and then suddenly wake up
    180 	// and decide to write to trace at a random point in time.
    181 	// However, such syscall will use the global trace.buf buffer, because we've
    182 	// acquired all p's by doing stop-the-world. So this protects us from such races.
    183 	lock(&trace.bufLock)
    184 
    185 	if trace.enabled || trace.shutdown {
    186 		unlock(&trace.bufLock)
    187 		startTheWorld()
    188 		return errorString("tracing is already enabled")
    189 	}
    190 
    191 	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
    192 	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
    193 	// That would lead to an inconsistent trace:
    194 	// - either GoSysExit appears before EvGoInSyscall,
    195 	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
    196 	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
    197 	// trace.enabled is set afterwards once we have emitted all preliminary events.
    198 	_g_ := getg()
    199 	_g_.m.startingtrace = true
    200 
    201 	// Obtain current stack ID to use in all traceEvGoCreate events below.
    202 	mp := acquirem()
    203 	stkBuf := make([]uintptr, traceStackSize)
    204 	stackID := traceStackID(mp, stkBuf, 2)
    205 	releasem(mp)
    206 
    207 	for _, gp := range allgs {
    208 		status := readgstatus(gp)
    209 		if status != _Gdead {
    210 			gp.traceseq = 0
    211 			gp.tracelastp = getg().m.p
    212 			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
    213 			id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
    214 			traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
    215 		}
    216 		if status == _Gwaiting {
    217 			// traceEvGoWaiting is implied to have seq=1.
    218 			gp.traceseq++
    219 			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
    220 		}
    221 		if status == _Gsyscall {
    222 			gp.traceseq++
    223 			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
    224 		} else {
    225 			gp.sysblocktraced = false
    226 		}
    227 	}
    228 	traceProcStart()
    229 	traceGoStart()
    230 	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
    231 	// If we do it the other way around, it is possible that exitsyscall will
    232 	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
    233 	// It will lead to a false conclusion that cputicks is broken.
    234 	trace.ticksStart = cputicks()
    235 	trace.timeStart = nanotime()
    236 	trace.headerWritten = false
    237 	trace.footerWritten = false
    238 
    239 	// string to id mapping
    240 	//  0 : reserved for an empty string
    241 	//  remaining: other strings registered by traceString
    242 	trace.stringSeq = 0
    243 	trace.strings = make(map[string]uint64)
    244 
    245 	trace.seqGC = 0
    246 	_g_.m.startingtrace = false
    247 	trace.enabled = true
    248 
    249 	// Register runtime goroutine labels.
    250 	_, pid, bufp := traceAcquireBuffer()
    251 	for i, label := range gcMarkWorkerModeStrings[:] {
    252 		trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
    253 	}
    254 	traceReleaseBuffer(pid)
    255 
    256 	unlock(&trace.bufLock)
    257 
    258 	startTheWorld()
    259 	return nil
    260 }
    261 
    262 // StopTrace stops tracing, if it was previously enabled.
    263 // StopTrace only returns after all the reads for the trace have completed.
    264 func StopTrace() {
    265 	// Stop the world so that we can collect the trace buffers from all p's below,
    266 	// and also to avoid races with traceEvent.
    267 	stopTheWorld("stop tracing")
    268 
    269 	// See the comment in StartTrace.
    270 	lock(&trace.bufLock)
    271 
    272 	if !trace.enabled {
    273 		unlock(&trace.bufLock)
    274 		startTheWorld()
    275 		return
    276 	}
    277 
    278 	traceGoSched()
    279 
    280 	// Loop over all allocated Ps because dead Ps may still have
    281 	// trace buffers.
    282 	for _, p := range allp[:cap(allp)] {
    283 		buf := p.tracebuf
    284 		if buf != 0 {
    285 			traceFullQueue(buf)
    286 			p.tracebuf = 0
    287 		}
    288 	}
    289 	if trace.buf != 0 {
    290 		buf := trace.buf
    291 		trace.buf = 0
    292 		if buf.ptr().pos != 0 {
    293 			traceFullQueue(buf)
    294 		}
    295 	}
    296 
    297 	for {
    298 		trace.ticksEnd = cputicks()
    299 		trace.timeEnd = nanotime()
    300 		// Windows time can tick only every 15ms, wait for at least one tick.
    301 		if trace.timeEnd != trace.timeStart {
    302 			break
    303 		}
    304 		osyield()
    305 	}
    306 
    307 	trace.enabled = false
    308 	trace.shutdown = true
    309 	unlock(&trace.bufLock)
    310 
    311 	startTheWorld()
    312 
    313 	// The world is started but we've set trace.shutdown, so new tracing can't start.
    314 	// Wait for the trace reader to flush pending buffers and stop.
    315 	semacquire(&trace.shutdownSema)
    316 	if raceenabled {
    317 		raceacquire(unsafe.Pointer(&trace.shutdownSema))
    318 	}
    319 
    320 	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
    321 	lock(&trace.lock)
    322 	for _, p := range allp[:cap(allp)] {
    323 		if p.tracebuf != 0 {
    324 			throw("trace: non-empty trace buffer in proc")
    325 		}
    326 	}
    327 	if trace.buf != 0 {
    328 		throw("trace: non-empty global trace buffer")
    329 	}
    330 	if trace.fullHead != 0 || trace.fullTail != 0 {
    331 		throw("trace: non-empty full trace buffer")
    332 	}
    333 	if trace.reading != 0 || trace.reader != 0 {
    334 		throw("trace: reading after shutdown")
    335 	}
    336 	for trace.empty != 0 {
    337 		buf := trace.empty
    338 		trace.empty = buf.ptr().link
    339 		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
    340 	}
    341 	trace.strings = nil
    342 	trace.shutdown = false
    343 	unlock(&trace.lock)
    344 }
    345 
    346 // ReadTrace returns the next chunk of binary tracing data, blocking until data
    347 // is available. If tracing is turned off and all the data accumulated while it
    348 // was on has been returned, ReadTrace returns nil. The caller must copy the
    349 // returned data before calling ReadTrace again.
    350 // ReadTrace must be called from one goroutine at a time.
    351 func ReadTrace() []byte {
    352 	// This function may need to lock trace.lock recursively
    353 	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
    354 	// To allow this we use trace.lockOwner.
    355 	// Also this function must not allocate while holding trace.lock:
    356 	// allocation can call heap allocate, which will try to emit a trace
    357 	// event while holding heap lock.
    358 	lock(&trace.lock)
    359 	trace.lockOwner = getg()
    360 
    361 	if trace.reader != 0 {
    362 		// More than one goroutine reads trace. This is bad.
    363 		// But we rather do not crash the program because of tracing,
    364 		// because tracing can be enabled at runtime on prod servers.
    365 		trace.lockOwner = nil
    366 		unlock(&trace.lock)
    367 		println("runtime: ReadTrace called from multiple goroutines simultaneously")
    368 		return nil
    369 	}
    370 	// Recycle the old buffer.
    371 	if buf := trace.reading; buf != 0 {
    372 		buf.ptr().link = trace.empty
    373 		trace.empty = buf
    374 		trace.reading = 0
    375 	}
    376 	// Write trace header.
    377 	if !trace.headerWritten {
    378 		trace.headerWritten = true
    379 		trace.lockOwner = nil
    380 		unlock(&trace.lock)
    381 		return []byte("go 1.10 trace\x00\x00\x00")
    382 	}
    383 	// Wait for new data.
    384 	if trace.fullHead == 0 && !trace.shutdown {
    385 		trace.reader.set(getg())
    386 		goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
    387 		lock(&trace.lock)
    388 	}
    389 	// Write a buffer.
    390 	if trace.fullHead != 0 {
    391 		buf := traceFullDequeue()
    392 		trace.reading = buf
    393 		trace.lockOwner = nil
    394 		unlock(&trace.lock)
    395 		return buf.ptr().arr[:buf.ptr().pos]
    396 	}
    397 	// Write footer with timer frequency.
    398 	if !trace.footerWritten {
    399 		trace.footerWritten = true
    400 		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
    401 		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
    402 		trace.lockOwner = nil
    403 		unlock(&trace.lock)
    404 		var data []byte
    405 		data = append(data, traceEvFrequency|0<<traceArgCountShift)
    406 		data = traceAppend(data, uint64(freq))
    407 		for i := range timers {
    408 			tb := &timers[i]
    409 			if tb.gp != nil {
    410 				data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
    411 				data = traceAppend(data, uint64(tb.gp.goid))
    412 			}
    413 		}
    414 		// This will emit a bunch of full buffers, we will pick them up
    415 		// on the next iteration.
    416 		trace.stackTab.dump()
    417 		return data
    418 	}
    419 	// Done.
    420 	if trace.shutdown {
    421 		trace.lockOwner = nil
    422 		unlock(&trace.lock)
    423 		if raceenabled {
    424 			// Model synchronization on trace.shutdownSema, which race
    425 			// detector does not see. This is required to avoid false
    426 			// race reports on writer passed to trace.Start.
    427 			racerelease(unsafe.Pointer(&trace.shutdownSema))
    428 		}
    429 		// trace.enabled is already reset, so can call traceable functions.
    430 		semrelease(&trace.shutdownSema)
    431 		return nil
    432 	}
    433 	// Also bad, but see the comment above.
    434 	trace.lockOwner = nil
    435 	unlock(&trace.lock)
    436 	println("runtime: spurious wakeup of trace reader")
    437 	return nil
    438 }
    439 
    440 // traceReader returns the trace reader that should be woken up, if any.
    441 func traceReader() *g {
    442 	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
    443 		return nil
    444 	}
    445 	lock(&trace.lock)
    446 	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
    447 		unlock(&trace.lock)
    448 		return nil
    449 	}
    450 	gp := trace.reader.ptr()
    451 	trace.reader.set(nil)
    452 	unlock(&trace.lock)
    453 	return gp
    454 }
    455 
    456 // traceProcFree frees trace buffer associated with pp.
    457 func traceProcFree(pp *p) {
    458 	buf := pp.tracebuf
    459 	pp.tracebuf = 0
    460 	if buf == 0 {
    461 		return
    462 	}
    463 	lock(&trace.lock)
    464 	traceFullQueue(buf)
    465 	unlock(&trace.lock)
    466 }
    467 
    468 // traceFullQueue queues buf into queue of full buffers.
    469 func traceFullQueue(buf traceBufPtr) {
    470 	buf.ptr().link = 0
    471 	if trace.fullHead == 0 {
    472 		trace.fullHead = buf
    473 	} else {
    474 		trace.fullTail.ptr().link = buf
    475 	}
    476 	trace.fullTail = buf
    477 }
    478 
    479 // traceFullDequeue dequeues from queue of full buffers.
    480 func traceFullDequeue() traceBufPtr {
    481 	buf := trace.fullHead
    482 	if buf == 0 {
    483 		return 0
    484 	}
    485 	trace.fullHead = buf.ptr().link
    486 	if trace.fullHead == 0 {
    487 		trace.fullTail = 0
    488 	}
    489 	buf.ptr().link = 0
    490 	return buf
    491 }
    492 
    493 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
    494 // ev is event type.
    495 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
    496 // If skip = 0, this event type should contain a stack, but we don't want
    497 // to collect and remember it for this particular call.
    498 func traceEvent(ev byte, skip int, args ...uint64) {
    499 	mp, pid, bufp := traceAcquireBuffer()
    500 	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
    501 	// This protects from races between traceEvent and StartTrace/StopTrace.
    502 
    503 	// The caller checked that trace.enabled == true, but trace.enabled might have been
    504 	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
    505 	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
    506 	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
    507 	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
    508 	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
    509 	if !trace.enabled && !mp.startingtrace {
    510 		traceReleaseBuffer(pid)
    511 		return
    512 	}
    513 	buf := (*bufp).ptr()
    514 	const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
    515 	if buf == nil || len(buf.arr)-buf.pos < maxSize {
    516 		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
    517 		(*bufp).set(buf)
    518 	}
    519 
    520 	ticks := uint64(cputicks()) / traceTickDiv
    521 	tickDiff := ticks - buf.lastTicks
    522 	buf.lastTicks = ticks
    523 	narg := byte(len(args))
    524 	if skip >= 0 {
    525 		narg++
    526 	}
    527 	// We have only 2 bits for number of arguments.
    528 	// If number is >= 3, then the event type is followed by event length in bytes.
    529 	if narg > 3 {
    530 		narg = 3
    531 	}
    532 	startPos := buf.pos
    533 	buf.byte(ev | narg<<traceArgCountShift)
    534 	var lenp *byte
    535 	if narg == 3 {
    536 		// Reserve the byte for length assuming that length < 128.
    537 		buf.varint(0)
    538 		lenp = &buf.arr[buf.pos-1]
    539 	}
    540 	buf.varint(tickDiff)
    541 	for _, a := range args {
    542 		buf.varint(a)
    543 	}
    544 	if skip == 0 {
    545 		buf.varint(0)
    546 	} else if skip > 0 {
    547 		buf.varint(traceStackID(mp, buf.stk[:], skip))
    548 	}
    549 	evSize := buf.pos - startPos
    550 	if evSize > maxSize {
    551 		throw("invalid length of trace event")
    552 	}
    553 	if lenp != nil {
    554 		// Fill in actual length.
    555 		*lenp = byte(evSize - 2)
    556 	}
    557 	traceReleaseBuffer(pid)
    558 }
    559 
    560 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
    561 	_g_ := getg()
    562 	gp := mp.curg
    563 	var nstk int
    564 	if gp == _g_ {
    565 		nstk = callers(skip+1, buf[:])
    566 	} else if gp != nil {
    567 		gp = mp.curg
    568 		nstk = gcallers(gp, skip, buf[:])
    569 	}
    570 	if nstk > 0 {
    571 		nstk-- // skip runtime.goexit
    572 	}
    573 	if nstk > 0 && gp.goid == 1 {
    574 		nstk-- // skip runtime.main
    575 	}
    576 	id := trace.stackTab.put(buf[:nstk])
    577 	return uint64(id)
    578 }
    579 
    580 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
    581 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
    582 	mp = acquirem()
    583 	if p := mp.p.ptr(); p != nil {
    584 		return mp, p.id, &p.tracebuf
    585 	}
    586 	lock(&trace.bufLock)
    587 	return mp, traceGlobProc, &trace.buf
    588 }
    589 
    590 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
    591 func traceReleaseBuffer(pid int32) {
    592 	if pid == traceGlobProc {
    593 		unlock(&trace.bufLock)
    594 	}
    595 	releasem(getg().m)
    596 }
    597 
    598 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
    599 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
    600 	owner := trace.lockOwner
    601 	dolock := owner == nil || owner != getg().m.curg
    602 	if dolock {
    603 		lock(&trace.lock)
    604 	}
    605 	if buf != 0 {
    606 		traceFullQueue(buf)
    607 	}
    608 	if trace.empty != 0 {
    609 		buf = trace.empty
    610 		trace.empty = buf.ptr().link
    611 	} else {
    612 		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
    613 		if buf == 0 {
    614 			throw("trace: out of memory")
    615 		}
    616 	}
    617 	bufp := buf.ptr()
    618 	bufp.link.set(nil)
    619 	bufp.pos = 0
    620 
    621 	// initialize the buffer for a new batch
    622 	ticks := uint64(cputicks()) / traceTickDiv
    623 	bufp.lastTicks = ticks
    624 	bufp.byte(traceEvBatch | 1<<traceArgCountShift)
    625 	bufp.varint(uint64(pid))
    626 	bufp.varint(ticks)
    627 
    628 	if dolock {
    629 		unlock(&trace.lock)
    630 	}
    631 	return buf
    632 }
    633 
    634 // traceString adds a string to the trace.strings and returns the id.
    635 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
    636 	if s == "" {
    637 		return 0, bufp
    638 	}
    639 	if id, ok := trace.strings[s]; ok {
    640 		return id, bufp
    641 	}
    642 
    643 	trace.stringSeq++
    644 	id := trace.stringSeq
    645 	trace.strings[s] = id
    646 
    647 	// memory allocation in above may trigger tracing and
    648 	// cause *bufp changes. Following code now works with *bufp,
    649 	// so there must be no memory allocation or any activities
    650 	// that causes tracing after this point.
    651 
    652 	buf := (*bufp).ptr()
    653 	size := 1 + 2*traceBytesPerNumber + len(s)
    654 	if buf == nil || len(buf.arr)-buf.pos < size {
    655 		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
    656 		(*bufp).set(buf)
    657 	}
    658 	buf.byte(traceEvString)
    659 	buf.varint(id)
    660 	buf.varint(uint64(len(s)))
    661 	buf.pos += copy(buf.arr[buf.pos:], s)
    662 
    663 	(*bufp).set(buf)
    664 	return id, bufp
    665 }
    666 
    667 // traceAppend appends v to buf in little-endian-base-128 encoding.
    668 func traceAppend(buf []byte, v uint64) []byte {
    669 	for ; v >= 0x80; v >>= 7 {
    670 		buf = append(buf, 0x80|byte(v))
    671 	}
    672 	buf = append(buf, byte(v))
    673 	return buf
    674 }
    675 
    676 // varint appends v to buf in little-endian-base-128 encoding.
    677 func (buf *traceBuf) varint(v uint64) {
    678 	pos := buf.pos
    679 	for ; v >= 0x80; v >>= 7 {
    680 		buf.arr[pos] = 0x80 | byte(v)
    681 		pos++
    682 	}
    683 	buf.arr[pos] = byte(v)
    684 	pos++
    685 	buf.pos = pos
    686 }
    687 
    688 // byte appends v to buf.
    689 func (buf *traceBuf) byte(v byte) {
    690 	buf.arr[buf.pos] = v
    691 	buf.pos++
    692 }
    693 
    694 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
    695 // It is lock-free for reading.
    696 type traceStackTable struct {
    697 	lock mutex
    698 	seq  uint32
    699 	mem  traceAlloc
    700 	tab  [1 << 13]traceStackPtr
    701 }
    702 
    703 // traceStack is a single stack in traceStackTable.
    704 type traceStack struct {
    705 	link traceStackPtr
    706 	hash uintptr
    707 	id   uint32
    708 	n    int
    709 	stk  [0]uintptr // real type [n]uintptr
    710 }
    711 
    712 type traceStackPtr uintptr
    713 
    714 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
    715 
    716 // stack returns slice of PCs.
    717 func (ts *traceStack) stack() []uintptr {
    718 	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
    719 }
    720 
    721 // put returns a unique id for the stack trace pcs and caches it in the table,
    722 // if it sees the trace for the first time.
    723 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
    724 	if len(pcs) == 0 {
    725 		return 0
    726 	}
    727 	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
    728 	// First, search the hashtable w/o the mutex.
    729 	if id := tab.find(pcs, hash); id != 0 {
    730 		return id
    731 	}
    732 	// Now, double check under the mutex.
    733 	lock(&tab.lock)
    734 	if id := tab.find(pcs, hash); id != 0 {
    735 		unlock(&tab.lock)
    736 		return id
    737 	}
    738 	// Create new record.
    739 	tab.seq++
    740 	stk := tab.newStack(len(pcs))
    741 	stk.hash = hash
    742 	stk.id = tab.seq
    743 	stk.n = len(pcs)
    744 	stkpc := stk.stack()
    745 	for i, pc := range pcs {
    746 		stkpc[i] = pc
    747 	}
    748 	part := int(hash % uintptr(len(tab.tab)))
    749 	stk.link = tab.tab[part]
    750 	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
    751 	unlock(&tab.lock)
    752 	return stk.id
    753 }
    754 
    755 // find checks if the stack trace pcs is already present in the table.
    756 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
    757 	part := int(hash % uintptr(len(tab.tab)))
    758 Search:
    759 	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
    760 		if stk.hash == hash && stk.n == len(pcs) {
    761 			for i, stkpc := range stk.stack() {
    762 				if stkpc != pcs[i] {
    763 					continue Search
    764 				}
    765 			}
    766 			return stk.id
    767 		}
    768 	}
    769 	return 0
    770 }
    771 
    772 // newStack allocates a new stack of size n.
    773 func (tab *traceStackTable) newStack(n int) *traceStack {
    774 	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
    775 }
    776 
    777 // allFrames returns all of the Frames corresponding to pcs.
    778 func allFrames(pcs []uintptr) []Frame {
    779 	frames := make([]Frame, 0, len(pcs))
    780 	ci := CallersFrames(pcs)
    781 	for {
    782 		f, more := ci.Next()
    783 		frames = append(frames, f)
    784 		if !more {
    785 			return frames
    786 		}
    787 	}
    788 }
    789 
    790 // dump writes all previously cached stacks to trace buffers,
    791 // releases all memory and resets state.
    792 func (tab *traceStackTable) dump() {
    793 	var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
    794 	bufp := traceFlush(0, 0)
    795 	for _, stk := range tab.tab {
    796 		stk := stk.ptr()
    797 		for ; stk != nil; stk = stk.link.ptr() {
    798 			tmpbuf := tmp[:0]
    799 			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
    800 			frames := allFrames(stk.stack())
    801 			tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
    802 			for _, f := range frames {
    803 				var frame traceFrame
    804 				frame, bufp = traceFrameForPC(bufp, 0, f)
    805 				tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
    806 				tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
    807 				tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
    808 				tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
    809 			}
    810 			// Now copy to the buffer.
    811 			size := 1 + traceBytesPerNumber + len(tmpbuf)
    812 			if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
    813 				bufp = traceFlush(bufp, 0)
    814 			}
    815 			buf := bufp.ptr()
    816 			buf.byte(traceEvStack | 3<<traceArgCountShift)
    817 			buf.varint(uint64(len(tmpbuf)))
    818 			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
    819 		}
    820 	}
    821 
    822 	lock(&trace.lock)
    823 	traceFullQueue(bufp)
    824 	unlock(&trace.lock)
    825 
    826 	tab.mem.drop()
    827 	*tab = traceStackTable{}
    828 }
    829 
    830 type traceFrame struct {
    831 	funcID uint64
    832 	fileID uint64
    833 	line   uint64
    834 }
    835 
    836 // traceFrameForPC records the frame information.
    837 // It may allocate memory.
    838 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
    839 	bufp := &buf
    840 	var frame traceFrame
    841 
    842 	fn := f.Function
    843 	const maxLen = 1 << 10
    844 	if len(fn) > maxLen {
    845 		fn = fn[len(fn)-maxLen:]
    846 	}
    847 	frame.funcID, bufp = traceString(bufp, pid, fn)
    848 	frame.line = uint64(f.Line)
    849 	file := f.File
    850 	if len(file) > maxLen {
    851 		file = file[len(file)-maxLen:]
    852 	}
    853 	frame.fileID, bufp = traceString(bufp, pid, file)
    854 	return frame, (*bufp)
    855 }
    856 
    857 // traceAlloc is a non-thread-safe region allocator.
    858 // It holds a linked list of traceAllocBlock.
    859 type traceAlloc struct {
    860 	head traceAllocBlockPtr
    861 	off  uintptr
    862 }
    863 
    864 // traceAllocBlock is a block in traceAlloc.
    865 //
    866 // traceAllocBlock is allocated from non-GC'd memory, so it must not
    867 // contain heap pointers. Writes to pointers to traceAllocBlocks do
    868 // not need write barriers.
    869 //
    870 //go:notinheap
    871 type traceAllocBlock struct {
    872 	next traceAllocBlockPtr
    873 	data [64<<10 - sys.PtrSize]byte
    874 }
    875 
    876 // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
    877 type traceAllocBlockPtr uintptr
    878 
    879 func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
    880 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
    881 
    882 // alloc allocates n-byte block.
    883 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
    884 	n = round(n, sys.PtrSize)
    885 	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
    886 		if n > uintptr(len(a.head.ptr().data)) {
    887 			throw("trace: alloc too large")
    888 		}
    889 		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
    890 		if block == nil {
    891 			throw("trace: out of memory")
    892 		}
    893 		block.next.set(a.head.ptr())
    894 		a.head.set(block)
    895 		a.off = 0
    896 	}
    897 	p := &a.head.ptr().data[a.off]
    898 	a.off += n
    899 	return unsafe.Pointer(p)
    900 }
    901 
    902 // drop frees all previously allocated memory and resets the allocator.
    903 func (a *traceAlloc) drop() {
    904 	for a.head != 0 {
    905 		block := a.head.ptr()
    906 		a.head.set(block.next.ptr())
    907 		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
    908 	}
    909 }
    910 
    911 // The following functions write specific events to trace.
    912 
    913 func traceGomaxprocs(procs int32) {
    914 	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
    915 }
    916 
    917 func traceProcStart() {
    918 	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
    919 }
    920 
    921 func traceProcStop(pp *p) {
    922 	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
    923 	// to handle this we temporary employ the P.
    924 	mp := acquirem()
    925 	oldp := mp.p
    926 	mp.p.set(pp)
    927 	traceEvent(traceEvProcStop, -1)
    928 	mp.p = oldp
    929 	releasem(mp)
    930 }
    931 
    932 func traceGCStart() {
    933 	traceEvent(traceEvGCStart, 3, trace.seqGC)
    934 	trace.seqGC++
    935 }
    936 
    937 func traceGCDone() {
    938 	traceEvent(traceEvGCDone, -1)
    939 }
    940 
    941 func traceGCSTWStart(kind int) {
    942 	traceEvent(traceEvGCSTWStart, -1, uint64(kind))
    943 }
    944 
    945 func traceGCSTWDone() {
    946 	traceEvent(traceEvGCSTWDone, -1)
    947 }
    948 
    949 // traceGCSweepStart prepares to trace a sweep loop. This does not
    950 // emit any events until traceGCSweepSpan is called.
    951 //
    952 // traceGCSweepStart must be paired with traceGCSweepDone and there
    953 // must be no preemption points between these two calls.
    954 func traceGCSweepStart() {
    955 	// Delay the actual GCSweepStart event until the first span
    956 	// sweep. If we don't sweep anything, don't emit any events.
    957 	_p_ := getg().m.p.ptr()
    958 	if _p_.traceSweep {
    959 		throw("double traceGCSweepStart")
    960 	}
    961 	_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
    962 }
    963 
    964 // traceGCSweepSpan traces the sweep of a single page.
    965 //
    966 // This may be called outside a traceGCSweepStart/traceGCSweepDone
    967 // pair; however, it will not emit any trace events in this case.
    968 func traceGCSweepSpan(bytesSwept uintptr) {
    969 	_p_ := getg().m.p.ptr()
    970 	if _p_.traceSweep {
    971 		if _p_.traceSwept == 0 {
    972 			traceEvent(traceEvGCSweepStart, 1)
    973 		}
    974 		_p_.traceSwept += bytesSwept
    975 	}
    976 }
    977 
    978 func traceGCSweepDone() {
    979 	_p_ := getg().m.p.ptr()
    980 	if !_p_.traceSweep {
    981 		throw("missing traceGCSweepStart")
    982 	}
    983 	if _p_.traceSwept != 0 {
    984 		traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
    985 	}
    986 	_p_.traceSweep = false
    987 }
    988 
    989 func traceGCMarkAssistStart() {
    990 	traceEvent(traceEvGCMarkAssistStart, 1)
    991 }
    992 
    993 func traceGCMarkAssistDone() {
    994 	traceEvent(traceEvGCMarkAssistDone, -1)
    995 }
    996 
    997 func traceGoCreate(newg *g, pc uintptr) {
    998 	newg.traceseq = 0
    999 	newg.tracelastp = getg().m.p
   1000 	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   1001 	id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
   1002 	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
   1003 }
   1004 
   1005 func traceGoStart() {
   1006 	_g_ := getg().m.curg
   1007 	_p_ := _g_.m.p
   1008 	_g_.traceseq++
   1009 	if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
   1010 		traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
   1011 	} else if _g_.tracelastp == _p_ {
   1012 		traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
   1013 	} else {
   1014 		_g_.tracelastp = _p_
   1015 		traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
   1016 	}
   1017 }
   1018 
   1019 func traceGoEnd() {
   1020 	traceEvent(traceEvGoEnd, -1)
   1021 }
   1022 
   1023 func traceGoSched() {
   1024 	_g_ := getg()
   1025 	_g_.tracelastp = _g_.m.p
   1026 	traceEvent(traceEvGoSched, 1)
   1027 }
   1028 
   1029 func traceGoPreempt() {
   1030 	_g_ := getg()
   1031 	_g_.tracelastp = _g_.m.p
   1032 	traceEvent(traceEvGoPreempt, 1)
   1033 }
   1034 
   1035 func traceGoPark(traceEv byte, skip int) {
   1036 	if traceEv&traceFutileWakeup != 0 {
   1037 		traceEvent(traceEvFutileWakeup, -1)
   1038 	}
   1039 	traceEvent(traceEv & ^traceFutileWakeup, skip)
   1040 }
   1041 
   1042 func traceGoUnpark(gp *g, skip int) {
   1043 	_p_ := getg().m.p
   1044 	gp.traceseq++
   1045 	if gp.tracelastp == _p_ {
   1046 		traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
   1047 	} else {
   1048 		gp.tracelastp = _p_
   1049 		traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
   1050 	}
   1051 }
   1052 
   1053 func traceGoSysCall() {
   1054 	traceEvent(traceEvGoSysCall, 1)
   1055 }
   1056 
   1057 func traceGoSysExit(ts int64) {
   1058 	if ts != 0 && ts < trace.ticksStart {
   1059 		// There is a race between the code that initializes sysexitticks
   1060 		// (in exitsyscall, which runs without a P, and therefore is not
   1061 		// stopped with the rest of the world) and the code that initializes
   1062 		// a new trace. The recorded sysexitticks must therefore be treated
   1063 		// as "best effort". If they are valid for this trace, then great,
   1064 		// use them for greater accuracy. But if they're not valid for this
   1065 		// trace, assume that the trace was started after the actual syscall
   1066 		// exit (but before we actually managed to start the goroutine,
   1067 		// aka right now), and assign a fresh time stamp to keep the log consistent.
   1068 		ts = 0
   1069 	}
   1070 	_g_ := getg().m.curg
   1071 	_g_.traceseq++
   1072 	_g_.tracelastp = _g_.m.p
   1073 	traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
   1074 }
   1075 
   1076 func traceGoSysBlock(pp *p) {
   1077 	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
   1078 	// to handle this we temporary employ the P.
   1079 	mp := acquirem()
   1080 	oldp := mp.p
   1081 	mp.p.set(pp)
   1082 	traceEvent(traceEvGoSysBlock, -1)
   1083 	mp.p = oldp
   1084 	releasem(mp)
   1085 }
   1086 
   1087 func traceHeapAlloc() {
   1088 	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
   1089 }
   1090 
   1091 func traceNextGC() {
   1092 	if memstats.next_gc == ^uint64(0) {
   1093 		// Heap-based triggering is disabled.
   1094 		traceEvent(traceEvNextGC, -1, 0)
   1095 	} else {
   1096 		traceEvent(traceEvNextGC, -1, memstats.next_gc)
   1097 	}
   1098 }
   1099