Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/atomic"
      9 	"runtime/internal/sys"
     10 	"unsafe"
     11 )
     12 
     13 // The code in this file implements stack trace walking for all architectures.
     14 // The most important fact about a given architecture is whether it uses a link register.
     15 // On systems with link registers, the prologue for a non-leaf function stores the
     16 // incoming value of LR at the bottom of the newly allocated stack frame.
     17 // On systems without link registers, the architecture pushes a return PC during
     18 // the call instruction, so the return PC ends up above the stack frame.
     19 // In this file, the return PC is always called LR, no matter how it was found.
     20 //
     21 // To date, the opposite of a link register architecture is an x86 architecture.
     22 // This code may need to change if some other kind of non-link-register
     23 // architecture comes along.
     24 //
     25 // The other important fact is the size of a pointer: on 32-bit systems the LR
     26 // takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes.
     27 // Typically this is ptrSize.
     28 //
     29 // As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still
     30 // stores an 8-byte return PC onto the stack. To accommodate this, we use regSize
     31 // as the size of the architecture-pushed return PC.
     32 //
     33 // usesLR is defined below in terms of minFrameSize, which is defined in
     34 // arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go.
     35 
     36 const usesLR = sys.MinFrameSize > 0
     37 
     38 var (
     39 	// initialized in tracebackinit
     40 	goexitPC             uintptr
     41 	jmpdeferPC           uintptr
     42 	mcallPC              uintptr
     43 	morestackPC          uintptr
     44 	mstartPC             uintptr
     45 	rt0_goPC             uintptr
     46 	asmcgocallPC         uintptr
     47 	sigpanicPC           uintptr
     48 	runfinqPC            uintptr
     49 	bgsweepPC            uintptr
     50 	forcegchelperPC      uintptr
     51 	timerprocPC          uintptr
     52 	gcBgMarkWorkerPC     uintptr
     53 	systemstack_switchPC uintptr
     54 	systemstackPC        uintptr
     55 	cgocallback_gofuncPC uintptr
     56 	skipPC               uintptr
     57 
     58 	gogoPC uintptr
     59 
     60 	externalthreadhandlerp uintptr // initialized elsewhere
     61 )
     62 
     63 func tracebackinit() {
     64 	// Go variable initialization happens late during runtime startup.
     65 	// Instead of initializing the variables above in the declarations,
     66 	// schedinit calls this function so that the variables are
     67 	// initialized and available earlier in the startup sequence.
     68 	goexitPC = funcPC(goexit)
     69 	jmpdeferPC = funcPC(jmpdefer)
     70 	mcallPC = funcPC(mcall)
     71 	morestackPC = funcPC(morestack)
     72 	mstartPC = funcPC(mstart)
     73 	rt0_goPC = funcPC(rt0_go)
     74 	asmcgocallPC = funcPC(asmcgocall)
     75 	sigpanicPC = funcPC(sigpanic)
     76 	runfinqPC = funcPC(runfinq)
     77 	bgsweepPC = funcPC(bgsweep)
     78 	forcegchelperPC = funcPC(forcegchelper)
     79 	timerprocPC = funcPC(timerproc)
     80 	gcBgMarkWorkerPC = funcPC(gcBgMarkWorker)
     81 	systemstack_switchPC = funcPC(systemstack_switch)
     82 	systemstackPC = funcPC(systemstack)
     83 	cgocallback_gofuncPC = funcPC(cgocallback_gofunc)
     84 	skipPC = funcPC(skipPleaseUseCallersFrames)
     85 
     86 	// used by sigprof handler
     87 	gogoPC = funcPC(gogo)
     88 }
     89 
     90 // Traceback over the deferred function calls.
     91 // Report them like calls that have been invoked but not started executing yet.
     92 func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) {
     93 	var frame stkframe
     94 	for d := gp._defer; d != nil; d = d.link {
     95 		fn := d.fn
     96 		if fn == nil {
     97 			// Defer of nil function. Args don't matter.
     98 			frame.pc = 0
     99 			frame.fn = funcInfo{}
    100 			frame.argp = 0
    101 			frame.arglen = 0
    102 			frame.argmap = nil
    103 		} else {
    104 			frame.pc = fn.fn
    105 			f := findfunc(frame.pc)
    106 			if !f.valid() {
    107 				print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
    108 				throw("unknown pc")
    109 			}
    110 			frame.fn = f
    111 			frame.argp = uintptr(deferArgs(d))
    112 			frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn)
    113 		}
    114 		frame.continpc = frame.pc
    115 		if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
    116 			return
    117 		}
    118 	}
    119 }
    120 
    121 const sizeofSkipFunction = 256
    122 
    123 // This function is defined in asm.s to be sizeofSkipFunction bytes long.
    124 func skipPleaseUseCallersFrames()
    125 
    126 // Generic traceback. Handles runtime stack prints (pcbuf == nil),
    127 // the runtime.Callers function (pcbuf != nil), as well as the garbage
    128 // collector (callback != nil).  A little clunky to merge these, but avoids
    129 // duplicating the code and all its subtlety.
    130 //
    131 // The skip argument is only valid with pcbuf != nil and counts the number
    132 // of logical frames to skip rather than physical frames (with inlining, a
    133 // PC in pcbuf can represent multiple calls). If a PC is partially skipped
    134 // and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where
    135 // N indicates the number of logical frames to skip in pcbuf[0].
    136 func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
    137 	if skip > 0 && callback != nil {
    138 		throw("gentraceback callback cannot be used with non-zero skip")
    139 	}
    140 	if goexitPC == 0 {
    141 		throw("gentraceback before goexitPC initialization")
    142 	}
    143 	g := getg()
    144 	if g == gp && g == g.m.curg {
    145 		// The starting sp has been passed in as a uintptr, and the caller may
    146 		// have other uintptr-typed stack references as well.
    147 		// If during one of the calls that got us here or during one of the
    148 		// callbacks below the stack must be grown, all these uintptr references
    149 		// to the stack will not be updated, and gentraceback will continue
    150 		// to inspect the old stack memory, which may no longer be valid.
    151 		// Even if all the variables were updated correctly, it is not clear that
    152 		// we want to expose a traceback that begins on one stack and ends
    153 		// on another stack. That could confuse callers quite a bit.
    154 		// Instead, we require that gentraceback and any other function that
    155 		// accepts an sp for the current goroutine (typically obtained by
    156 		// calling getcallersp) must not run on that goroutine's stack but
    157 		// instead on the g0 stack.
    158 		throw("gentraceback cannot trace user goroutine on its own stack")
    159 	}
    160 	level, _, _ := gotraceback()
    161 
    162 	if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
    163 		if gp.syscallsp != 0 {
    164 			pc0 = gp.syscallpc
    165 			sp0 = gp.syscallsp
    166 			if usesLR {
    167 				lr0 = 0
    168 			}
    169 		} else {
    170 			pc0 = gp.sched.pc
    171 			sp0 = gp.sched.sp
    172 			if usesLR {
    173 				lr0 = gp.sched.lr
    174 			}
    175 		}
    176 	}
    177 
    178 	nprint := 0
    179 	var frame stkframe
    180 	frame.pc = pc0
    181 	frame.sp = sp0
    182 	if usesLR {
    183 		frame.lr = lr0
    184 	}
    185 	waspanic := false
    186 	cgoCtxt := gp.cgoCtxt
    187 	printing := pcbuf == nil && callback == nil
    188 	_defer := gp._defer
    189 	elideWrapper := false
    190 
    191 	for _defer != nil && _defer.sp == _NoArgs {
    192 		_defer = _defer.link
    193 	}
    194 
    195 	// If the PC is zero, it's likely a nil function call.
    196 	// Start in the caller's frame.
    197 	if frame.pc == 0 {
    198 		if usesLR {
    199 			frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
    200 			frame.lr = 0
    201 		} else {
    202 			frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp)))
    203 			frame.sp += sys.RegSize
    204 		}
    205 	}
    206 
    207 	f := findfunc(frame.pc)
    208 	if !f.valid() {
    209 		if callback != nil || printing {
    210 			print("runtime: unknown pc ", hex(frame.pc), "\n")
    211 			tracebackHexdump(gp.stack, &frame, 0)
    212 		}
    213 		if callback != nil {
    214 			throw("unknown pc")
    215 		}
    216 		return 0
    217 	}
    218 	frame.fn = f
    219 
    220 	var cache pcvalueCache
    221 
    222 	n := 0
    223 	for n < max {
    224 		// Typically:
    225 		//	pc is the PC of the running function.
    226 		//	sp is the stack pointer at that program counter.
    227 		//	fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
    228 		//	stk is the stack containing sp.
    229 		//	The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
    230 		f = frame.fn
    231 		if f.pcsp == 0 {
    232 			// No frame information, must be external function, like race support.
    233 			// See golang.org/issue/13568.
    234 			break
    235 		}
    236 
    237 		// Found an actual function.
    238 		// Derive frame pointer and link register.
    239 		if frame.fp == 0 {
    240 			// We want to jump over the systemstack switch. If we're running on the
    241 			// g0, this systemstack is at the top of the stack.
    242 			// if we're not on g0 or there's a no curg, then this is a regular call.
    243 			sp := frame.sp
    244 			if flags&_TraceJumpStack != 0 && f.entry == systemstackPC && gp == g.m.g0 && gp.m.curg != nil {
    245 				sp = gp.m.curg.sched.sp
    246 				frame.sp = sp
    247 				cgoCtxt = gp.m.curg.cgoCtxt
    248 			}
    249 			frame.fp = sp + uintptr(funcspdelta(f, frame.pc, &cache))
    250 			if !usesLR {
    251 				// On x86, call instruction pushes return PC before entering new function.
    252 				frame.fp += sys.RegSize
    253 			}
    254 		}
    255 		var flr funcInfo
    256 		if topofstack(f, gp.m != nil && gp == gp.m.g0) {
    257 			frame.lr = 0
    258 			flr = funcInfo{}
    259 		} else if usesLR && f.entry == jmpdeferPC {
    260 			// jmpdefer modifies SP/LR/PC non-atomically.
    261 			// If a profiling interrupt arrives during jmpdefer,
    262 			// the stack unwind may see a mismatched register set
    263 			// and get confused. Stop if we see PC within jmpdefer
    264 			// to avoid that confusion.
    265 			// See golang.org/issue/8153.
    266 			if callback != nil {
    267 				throw("traceback_arm: found jmpdefer when tracing with callback")
    268 			}
    269 			frame.lr = 0
    270 		} else {
    271 			var lrPtr uintptr
    272 			if usesLR {
    273 				if n == 0 && frame.sp < frame.fp || frame.lr == 0 {
    274 					lrPtr = frame.sp
    275 					frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
    276 				}
    277 			} else {
    278 				if frame.lr == 0 {
    279 					lrPtr = frame.fp - sys.RegSize
    280 					frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr)))
    281 				}
    282 			}
    283 			flr = findfunc(frame.lr)
    284 			if !flr.valid() {
    285 				// This happens if you get a profiling interrupt at just the wrong time.
    286 				// In that context it is okay to stop early.
    287 				// But if callback is set, we're doing a garbage collection and must
    288 				// get everything, so crash loudly.
    289 				doPrint := printing
    290 				if doPrint && gp.m.incgo {
    291 					// We can inject sigpanic
    292 					// calls directly into C code,
    293 					// in which case we'll see a C
    294 					// return PC. Don't complain.
    295 					doPrint = false
    296 				}
    297 				if callback != nil || doPrint {
    298 					print("runtime: unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n")
    299 					tracebackHexdump(gp.stack, &frame, lrPtr)
    300 				}
    301 				if callback != nil {
    302 					throw("unknown caller pc")
    303 				}
    304 			}
    305 		}
    306 
    307 		frame.varp = frame.fp
    308 		if !usesLR {
    309 			// On x86, call instruction pushes return PC before entering new function.
    310 			frame.varp -= sys.RegSize
    311 		}
    312 
    313 		// If framepointer_enabled and there's a frame, then
    314 		// there's a saved bp here.
    315 		if framepointer_enabled && GOARCH == "amd64" && frame.varp > frame.sp {
    316 			frame.varp -= sys.RegSize
    317 		}
    318 
    319 		// Derive size of arguments.
    320 		// Most functions have a fixed-size argument block,
    321 		// so we can use metadata about the function f.
    322 		// Not all, though: there are some variadic functions
    323 		// in package runtime and reflect, and for those we use call-specific
    324 		// metadata recorded by f's caller.
    325 		if callback != nil || printing {
    326 			frame.argp = frame.fp + sys.MinFrameSize
    327 			frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, nil)
    328 		}
    329 
    330 		// Determine frame's 'continuation PC', where it can continue.
    331 		// Normally this is the return address on the stack, but if sigpanic
    332 		// is immediately below this function on the stack, then the frame
    333 		// stopped executing due to a trap, and frame.pc is probably not
    334 		// a safe point for looking up liveness information. In this panicking case,
    335 		// the function either doesn't return at all (if it has no defers or if the
    336 		// defers do not recover) or it returns from one of the calls to
    337 		// deferproc a second time (if the corresponding deferred func recovers).
    338 		// It suffices to assume that the most recent deferproc is the one that
    339 		// returns; everything live at earlier deferprocs is still live at that one.
    340 		frame.continpc = frame.pc
    341 		if waspanic {
    342 			if _defer != nil && _defer.sp == frame.sp {
    343 				frame.continpc = _defer.pc
    344 			} else {
    345 				frame.continpc = 0
    346 			}
    347 		}
    348 
    349 		// Unwind our local defer stack past this frame.
    350 		for _defer != nil && (_defer.sp == frame.sp || _defer.sp == _NoArgs) {
    351 			_defer = _defer.link
    352 		}
    353 
    354 		if callback != nil {
    355 			if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
    356 				return n
    357 			}
    358 		}
    359 
    360 		if pcbuf != nil {
    361 			if skip == 0 {
    362 				(*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc
    363 			} else {
    364 				// backup to CALL instruction to read inlining info (same logic as below)
    365 				tracepc := frame.pc
    366 				if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic {
    367 					tracepc--
    368 				}
    369 				inldata := funcdata(f, _FUNCDATA_InlTree)
    370 
    371 				// no inlining info, skip the physical frame
    372 				if inldata == nil {
    373 					skip--
    374 					goto skipped
    375 				}
    376 
    377 				ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache)
    378 				inltree := (*[1 << 20]inlinedCall)(inldata)
    379 				// skip the logical (inlined) frames
    380 				logicalSkipped := 0
    381 				for ix >= 0 && skip > 0 {
    382 					skip--
    383 					logicalSkipped++
    384 					ix = inltree[ix].parent
    385 				}
    386 
    387 				// skip the physical frame if there's more to skip
    388 				if skip > 0 {
    389 					skip--
    390 					goto skipped
    391 				}
    392 
    393 				// now we have a partially skipped frame
    394 				(*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc
    395 
    396 				// if there's room, pcbuf[1] is a skip PC that encodes the number of skipped frames in pcbuf[0]
    397 				if n+1 < max {
    398 					n++
    399 					skipPC := funcPC(skipPleaseUseCallersFrames) + uintptr(logicalSkipped)
    400 					(*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = skipPC
    401 				}
    402 			}
    403 		}
    404 
    405 		if printing {
    406 			// assume skip=0 for printing.
    407 			//
    408 			// Never elide wrappers if we haven't printed
    409 			// any frames. And don't elide wrappers that
    410 			// called panic rather than the wrapped
    411 			// function. Otherwise, leave them out.
    412 			name := funcname(f)
    413 			nextElideWrapper := elideWrapperCalling(name)
    414 			if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, elideWrapper && nprint != 0) {
    415 				// Print during crash.
    416 				//	main(0x1, 0x2, 0x3)
    417 				//		/home/rsc/go/src/runtime/x.go:23 +0xf
    418 				//
    419 				tracepc := frame.pc // back up to CALL instruction for funcline.
    420 				if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic {
    421 					tracepc--
    422 				}
    423 				file, line := funcline(f, tracepc)
    424 				inldata := funcdata(f, _FUNCDATA_InlTree)
    425 				if inldata != nil {
    426 					inltree := (*[1 << 20]inlinedCall)(inldata)
    427 					ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil)
    428 					for ix != -1 {
    429 						name := funcnameFromNameoff(f, inltree[ix].func_)
    430 						print(name, "(...)\n")
    431 						print("\t", file, ":", line, "\n")
    432 
    433 						file = funcfile(f, inltree[ix].file)
    434 						line = inltree[ix].line
    435 						ix = inltree[ix].parent
    436 					}
    437 				}
    438 				if name == "runtime.gopanic" {
    439 					name = "panic"
    440 				}
    441 				print(name, "(")
    442 				argp := (*[100]uintptr)(unsafe.Pointer(frame.argp))
    443 				for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ {
    444 					if i >= 10 {
    445 						print(", ...")
    446 						break
    447 					}
    448 					if i != 0 {
    449 						print(", ")
    450 					}
    451 					print(hex(argp[i]))
    452 				}
    453 				print(")\n")
    454 				print("\t", file, ":", line)
    455 				if frame.pc > f.entry {
    456 					print(" +", hex(frame.pc-f.entry))
    457 				}
    458 				if g.m.throwing > 0 && gp == g.m.curg || level >= 2 {
    459 					print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc))
    460 				}
    461 				print("\n")
    462 				nprint++
    463 			}
    464 			elideWrapper = nextElideWrapper
    465 		}
    466 		n++
    467 
    468 	skipped:
    469 		if f.entry == cgocallback_gofuncPC && len(cgoCtxt) > 0 {
    470 			ctxt := cgoCtxt[len(cgoCtxt)-1]
    471 			cgoCtxt = cgoCtxt[:len(cgoCtxt)-1]
    472 
    473 			// skip only applies to Go frames.
    474 			// callback != nil only used when we only care
    475 			// about Go frames.
    476 			if skip == 0 && callback == nil {
    477 				n = tracebackCgoContext(pcbuf, printing, ctxt, n, max)
    478 			}
    479 		}
    480 
    481 		waspanic = f.entry == sigpanicPC
    482 
    483 		// Do not unwind past the bottom of the stack.
    484 		if !flr.valid() {
    485 			break
    486 		}
    487 
    488 		// Unwind to next frame.
    489 		frame.fn = flr
    490 		frame.pc = frame.lr
    491 		frame.lr = 0
    492 		frame.sp = frame.fp
    493 		frame.fp = 0
    494 		frame.argmap = nil
    495 
    496 		// On link register architectures, sighandler saves the LR on stack
    497 		// before faking a call to sigpanic.
    498 		if usesLR && waspanic {
    499 			x := *(*uintptr)(unsafe.Pointer(frame.sp))
    500 			frame.sp += sys.MinFrameSize
    501 			if GOARCH == "arm64" {
    502 				// arm64 needs 16-byte aligned SP, always
    503 				frame.sp += sys.PtrSize
    504 			}
    505 			f = findfunc(frame.pc)
    506 			frame.fn = f
    507 			if !f.valid() {
    508 				frame.pc = x
    509 			} else if funcspdelta(f, frame.pc, &cache) == 0 {
    510 				frame.lr = x
    511 			}
    512 		}
    513 	}
    514 
    515 	if printing {
    516 		n = nprint
    517 	}
    518 
    519 	// If callback != nil, we're being called to gather stack information during
    520 	// garbage collection or stack growth. In that context, require that we used
    521 	// up the entire defer stack. If not, then there is a bug somewhere and the
    522 	// garbage collection or stack growth may not have seen the correct picture
    523 	// of the stack. Crash now instead of silently executing the garbage collection
    524 	// or stack copy incorrectly and setting up for a mysterious crash later.
    525 	//
    526 	// Note that panic != nil is okay here: there can be leftover panics,
    527 	// because the defers on the panic stack do not nest in frame order as
    528 	// they do on the defer stack. If you have:
    529 	//
    530 	//	frame 1 defers d1
    531 	//	frame 2 defers d2
    532 	//	frame 3 defers d3
    533 	//	frame 4 panics
    534 	//	frame 4's panic starts running defers
    535 	//	frame 5, running d3, defers d4
    536 	//	frame 5 panics
    537 	//	frame 5's panic starts running defers
    538 	//	frame 6, running d4, garbage collects
    539 	//	frame 6, running d2, garbage collects
    540 	//
    541 	// During the execution of d4, the panic stack is d4 -> d3, which
    542 	// is nested properly, and we'll treat frame 3 as resumable, because we
    543 	// can find d3. (And in fact frame 3 is resumable. If d4 recovers
    544 	// and frame 5 continues running, d3, d3 can recover and we'll
    545 	// resume execution in (returning from) frame 3.)
    546 	//
    547 	// During the execution of d2, however, the panic stack is d2 -> d3,
    548 	// which is inverted. The scan will match d2 to frame 2 but having
    549 	// d2 on the stack until then means it will not match d3 to frame 3.
    550 	// This is okay: if we're running d2, then all the defers after d2 have
    551 	// completed and their corresponding frames are dead. Not finding d3
    552 	// for frame 3 means we'll set frame 3's continpc == 0, which is correct
    553 	// (frame 3 is dead). At the end of the walk the panic stack can thus
    554 	// contain defers (d3 in this case) for dead frames. The inversion here
    555 	// always indicates a dead frame, and the effect of the inversion on the
    556 	// scan is to hide those dead frames, so the scan is still okay:
    557 	// what's left on the panic stack are exactly (and only) the dead frames.
    558 	//
    559 	// We require callback != nil here because only when callback != nil
    560 	// do we know that gentraceback is being called in a "must be correct"
    561 	// context as opposed to a "best effort" context. The tracebacks with
    562 	// callbacks only happen when everything is stopped nicely.
    563 	// At other times, such as when gathering a stack for a profiling signal
    564 	// or when printing a traceback during a crash, everything may not be
    565 	// stopped nicely, and the stack walk may not be able to complete.
    566 	// It's okay in those situations not to use up the entire defer stack:
    567 	// incomplete information then is still better than nothing.
    568 	if callback != nil && n < max && _defer != nil {
    569 		if _defer != nil {
    570 			print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
    571 		}
    572 		for _defer = gp._defer; _defer != nil; _defer = _defer.link {
    573 			print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
    574 		}
    575 		throw("traceback has leftover defers")
    576 	}
    577 
    578 	if callback != nil && n < max && frame.sp != gp.stktopsp {
    579 		print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n")
    580 		print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n")
    581 		throw("traceback did not unwind completely")
    582 	}
    583 
    584 	return n
    585 }
    586 
    587 // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl
    588 // and reflect.methodValue.
    589 type reflectMethodValue struct {
    590 	fn    uintptr
    591 	stack *bitvector // args bitmap
    592 }
    593 
    594 // getArgInfo returns the argument frame information for a call to f
    595 // with call frame frame.
    596 //
    597 // This is used for both actual calls with active stack frames and for
    598 // deferred calls that are not yet executing. If this is an actual
    599 // call, ctxt must be nil (getArgInfo will retrieve what it needs from
    600 // the active stack frame). If this is a deferred call, ctxt must be
    601 // the function object that was deferred.
    602 func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) {
    603 	arglen = uintptr(f.args)
    604 	if needArgMap && f.args == _ArgsSizeUnknown {
    605 		// Extract argument bitmaps for reflect stubs from the calls they made to reflect.
    606 		switch funcname(f) {
    607 		case "reflect.makeFuncStub", "reflect.methodValueCall":
    608 			// These take a *reflect.methodValue as their
    609 			// context register.
    610 			var mv *reflectMethodValue
    611 			if ctxt != nil {
    612 				// This is not an actual call, but a
    613 				// deferred call. The function value
    614 				// is itself the *reflect.methodValue.
    615 				mv = (*reflectMethodValue)(unsafe.Pointer(ctxt))
    616 			} else {
    617 				// This is a real call that took the
    618 				// *reflect.methodValue as its context
    619 				// register and immediately saved it
    620 				// to 0(SP). Get the methodValue from
    621 				// 0(SP).
    622 				arg0 := frame.sp + sys.MinFrameSize
    623 				mv = *(**reflectMethodValue)(unsafe.Pointer(arg0))
    624 			}
    625 			if mv.fn != f.entry {
    626 				print("runtime: confused by ", funcname(f), "\n")
    627 				throw("reflect mismatch")
    628 			}
    629 			bv := mv.stack
    630 			arglen = uintptr(bv.n * sys.PtrSize)
    631 			argmap = bv
    632 		}
    633 	}
    634 	return
    635 }
    636 
    637 // tracebackCgoContext handles tracing back a cgo context value, from
    638 // the context argument to setCgoTraceback, for the gentraceback
    639 // function. It returns the new value of n.
    640 func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int {
    641 	var cgoPCs [32]uintptr
    642 	cgoContextPCs(ctxt, cgoPCs[:])
    643 	var arg cgoSymbolizerArg
    644 	anySymbolized := false
    645 	for _, pc := range cgoPCs {
    646 		if pc == 0 || n >= max {
    647 			break
    648 		}
    649 		if pcbuf != nil {
    650 			(*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
    651 		}
    652 		if printing {
    653 			if cgoSymbolizer == nil {
    654 				print("non-Go function at pc=", hex(pc), "\n")
    655 			} else {
    656 				c := printOneCgoTraceback(pc, max-n, &arg)
    657 				n += c - 1 // +1 a few lines down
    658 				anySymbolized = true
    659 			}
    660 		}
    661 		n++
    662 	}
    663 	if anySymbolized {
    664 		arg.pc = 0
    665 		callCgoSymbolizer(&arg)
    666 	}
    667 	return n
    668 }
    669 
    670 func printcreatedby(gp *g) {
    671 	// Show what created goroutine, except main goroutine (goid 1).
    672 	pc := gp.gopc
    673 	f := findfunc(pc)
    674 	if f.valid() && showframe(f, gp, false, false) && gp.goid != 1 {
    675 		print("created by ", funcname(f), "\n")
    676 		tracepc := pc // back up to CALL instruction for funcline.
    677 		if pc > f.entry {
    678 			tracepc -= sys.PCQuantum
    679 		}
    680 		file, line := funcline(f, tracepc)
    681 		print("\t", file, ":", line)
    682 		if pc > f.entry {
    683 			print(" +", hex(pc-f.entry))
    684 		}
    685 		print("\n")
    686 	}
    687 }
    688 
    689 func traceback(pc, sp, lr uintptr, gp *g) {
    690 	traceback1(pc, sp, lr, gp, 0)
    691 }
    692 
    693 // tracebacktrap is like traceback but expects that the PC and SP were obtained
    694 // from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp.
    695 // Because they are from a trap instead of from a saved pair,
    696 // the initial PC must not be rewound to the previous instruction.
    697 // (All the saved pairs record a PC that is a return address, so we
    698 // rewind it into the CALL instruction.)
    699 func tracebacktrap(pc, sp, lr uintptr, gp *g) {
    700 	traceback1(pc, sp, lr, gp, _TraceTrap)
    701 }
    702 
    703 func traceback1(pc, sp, lr uintptr, gp *g, flags uint) {
    704 	// If the goroutine is in cgo, and we have a cgo traceback, print that.
    705 	if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 {
    706 		// Lock cgoCallers so that a signal handler won't
    707 		// change it, copy the array, reset it, unlock it.
    708 		// We are locked to the thread and are not running
    709 		// concurrently with a signal handler.
    710 		// We just have to stop a signal handler from interrupting
    711 		// in the middle of our copy.
    712 		atomic.Store(&gp.m.cgoCallersUse, 1)
    713 		cgoCallers := *gp.m.cgoCallers
    714 		gp.m.cgoCallers[0] = 0
    715 		atomic.Store(&gp.m.cgoCallersUse, 0)
    716 
    717 		printCgoTraceback(&cgoCallers)
    718 	}
    719 
    720 	var n int
    721 	if readgstatus(gp)&^_Gscan == _Gsyscall {
    722 		// Override registers if blocked in system call.
    723 		pc = gp.syscallpc
    724 		sp = gp.syscallsp
    725 		flags &^= _TraceTrap
    726 	}
    727 	// Print traceback. By default, omits runtime frames.
    728 	// If that means we print nothing at all, repeat forcing all frames printed.
    729 	n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags)
    730 	if n == 0 && (flags&_TraceRuntimeFrames) == 0 {
    731 		n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames)
    732 	}
    733 	if n == _TracebackMaxFrames {
    734 		print("...additional frames elided...\n")
    735 	}
    736 	printcreatedby(gp)
    737 }
    738 
    739 func callers(skip int, pcbuf []uintptr) int {
    740 	sp := getcallersp(unsafe.Pointer(&skip))
    741 	pc := getcallerpc()
    742 	gp := getg()
    743 	var n int
    744 	systemstack(func() {
    745 		n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
    746 	})
    747 	return n
    748 }
    749 
    750 func gcallers(gp *g, skip int, pcbuf []uintptr) int {
    751 	return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
    752 }
    753 
    754 func showframe(f funcInfo, gp *g, firstFrame, elideWrapper bool) bool {
    755 	g := getg()
    756 	if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
    757 		return true
    758 	}
    759 	level, _, _ := gotraceback()
    760 	if level > 1 {
    761 		// Show all frames.
    762 		return true
    763 	}
    764 
    765 	if !f.valid() {
    766 		return false
    767 	}
    768 
    769 	if elideWrapper {
    770 		file, _ := funcline(f, f.entry)
    771 		if file == "<autogenerated>" {
    772 			return false
    773 		}
    774 	}
    775 
    776 	name := funcname(f)
    777 
    778 	// Special case: always show runtime.gopanic frame
    779 	// in the middle of a stack trace, so that we can
    780 	// see the boundary between ordinary code and
    781 	// panic-induced deferred code.
    782 	// See golang.org/issue/5832.
    783 	if name == "runtime.gopanic" && !firstFrame {
    784 		return true
    785 	}
    786 
    787 	return contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name))
    788 }
    789 
    790 // isExportedRuntime reports whether name is an exported runtime function.
    791 // It is only for runtime functions, so ASCII A-Z is fine.
    792 func isExportedRuntime(name string) bool {
    793 	const n = len("runtime.")
    794 	return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z'
    795 }
    796 
    797 // elideWrapperCalling returns whether a wrapper function that called
    798 // function "name" should be elided from stack traces.
    799 func elideWrapperCalling(name string) bool {
    800 	// If the wrapper called a panic function instead of the
    801 	// wrapped function, we want to include it in stacks.
    802 	return !(name == "runtime.gopanic" || name == "runtime.sigpanic" || name == "runtime.panicwrap")
    803 }
    804 
    805 var gStatusStrings = [...]string{
    806 	_Gidle:      "idle",
    807 	_Grunnable:  "runnable",
    808 	_Grunning:   "running",
    809 	_Gsyscall:   "syscall",
    810 	_Gwaiting:   "waiting",
    811 	_Gdead:      "dead",
    812 	_Gcopystack: "copystack",
    813 }
    814 
    815 func goroutineheader(gp *g) {
    816 	gpstatus := readgstatus(gp)
    817 
    818 	isScan := gpstatus&_Gscan != 0
    819 	gpstatus &^= _Gscan // drop the scan bit
    820 
    821 	// Basic string status
    822 	var status string
    823 	if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
    824 		status = gStatusStrings[gpstatus]
    825 	} else {
    826 		status = "???"
    827 	}
    828 
    829 	// Override.
    830 	if gpstatus == _Gwaiting && gp.waitreason != "" {
    831 		status = gp.waitreason
    832 	}
    833 
    834 	// approx time the G is blocked, in minutes
    835 	var waitfor int64
    836 	if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
    837 		waitfor = (nanotime() - gp.waitsince) / 60e9
    838 	}
    839 	print("goroutine ", gp.goid, " [", status)
    840 	if isScan {
    841 		print(" (scan)")
    842 	}
    843 	if waitfor >= 1 {
    844 		print(", ", waitfor, " minutes")
    845 	}
    846 	if gp.lockedm != 0 {
    847 		print(", locked to thread")
    848 	}
    849 	print("]:\n")
    850 }
    851 
    852 func tracebackothers(me *g) {
    853 	level, _, _ := gotraceback()
    854 
    855 	// Show the current goroutine first, if we haven't already.
    856 	g := getg()
    857 	gp := g.m.curg
    858 	if gp != nil && gp != me {
    859 		print("\n")
    860 		goroutineheader(gp)
    861 		traceback(^uintptr(0), ^uintptr(0), 0, gp)
    862 	}
    863 
    864 	lock(&allglock)
    865 	for _, gp := range allgs {
    866 		if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp) && level < 2 {
    867 			continue
    868 		}
    869 		print("\n")
    870 		goroutineheader(gp)
    871 		// Note: gp.m == g.m occurs when tracebackothers is
    872 		// called from a signal handler initiated during a
    873 		// systemstack call. The original G is still in the
    874 		// running state, and we want to print its stack.
    875 		if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning {
    876 			print("\tgoroutine running on other thread; stack unavailable\n")
    877 			printcreatedby(gp)
    878 		} else {
    879 			traceback(^uintptr(0), ^uintptr(0), 0, gp)
    880 		}
    881 	}
    882 	unlock(&allglock)
    883 }
    884 
    885 // tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
    886 // for debugging purposes. If the address bad is included in the
    887 // hexdumped range, it will mark it as well.
    888 func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
    889 	const expand = 32 * sys.PtrSize
    890 	const maxExpand = 256 * sys.PtrSize
    891 	// Start around frame.sp.
    892 	lo, hi := frame.sp, frame.sp
    893 	// Expand to include frame.fp.
    894 	if frame.fp != 0 && frame.fp < lo {
    895 		lo = frame.fp
    896 	}
    897 	if frame.fp != 0 && frame.fp > hi {
    898 		hi = frame.fp
    899 	}
    900 	// Expand a bit more.
    901 	lo, hi = lo-expand, hi+expand
    902 	// But don't go too far from frame.sp.
    903 	if lo < frame.sp-maxExpand {
    904 		lo = frame.sp - maxExpand
    905 	}
    906 	if hi > frame.sp+maxExpand {
    907 		hi = frame.sp + maxExpand
    908 	}
    909 	// And don't go outside the stack bounds.
    910 	if lo < stk.lo {
    911 		lo = stk.lo
    912 	}
    913 	if hi > stk.hi {
    914 		hi = stk.hi
    915 	}
    916 
    917 	// Print the hex dump.
    918 	print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n")
    919 	hexdumpWords(lo, hi, func(p uintptr) byte {
    920 		switch p {
    921 		case frame.fp:
    922 			return '>'
    923 		case frame.sp:
    924 			return '<'
    925 		case bad:
    926 			return '!'
    927 		}
    928 		return 0
    929 	})
    930 }
    931 
    932 // Does f mark the top of a goroutine stack?
    933 func topofstack(f funcInfo, g0 bool) bool {
    934 	pc := f.entry
    935 	return pc == goexitPC ||
    936 		pc == mstartPC ||
    937 		pc == mcallPC ||
    938 		pc == morestackPC ||
    939 		pc == rt0_goPC ||
    940 		externalthreadhandlerp != 0 && pc == externalthreadhandlerp ||
    941 		// asmcgocall is TOS on the system stack because it
    942 		// switches to the system stack, but in this case we
    943 		// can come back to the regular stack and still want
    944 		// to be able to unwind through the call that appeared
    945 		// on the regular stack.
    946 		(g0 && pc == asmcgocallPC)
    947 }
    948 
    949 // isSystemGoroutine reports whether the goroutine g must be omitted in
    950 // stack dumps and deadlock detector.
    951 func isSystemGoroutine(gp *g) bool {
    952 	pc := gp.startpc
    953 	return pc == runfinqPC && !fingRunning ||
    954 		pc == bgsweepPC ||
    955 		pc == forcegchelperPC ||
    956 		pc == timerprocPC ||
    957 		pc == gcBgMarkWorkerPC
    958 }
    959 
    960 // SetCgoTraceback records three C functions to use to gather
    961 // traceback information from C code and to convert that traceback
    962 // information into symbolic information. These are used when printing
    963 // stack traces for a program that uses cgo.
    964 //
    965 // The traceback and context functions may be called from a signal
    966 // handler, and must therefore use only async-signal safe functions.
    967 // The symbolizer function may be called while the program is
    968 // crashing, and so must be cautious about using memory.  None of the
    969 // functions may call back into Go.
    970 //
    971 // The context function will be called with a single argument, a
    972 // pointer to a struct:
    973 //
    974 //	struct {
    975 //		Context uintptr
    976 //	}
    977 //
    978 // In C syntax, this struct will be
    979 //
    980 //	struct {
    981 //		uintptr_t Context;
    982 //	};
    983 //
    984 // If the Context field is 0, the context function is being called to
    985 // record the current traceback context. It should record in the
    986 // Context field whatever information is needed about the current
    987 // point of execution to later produce a stack trace, probably the
    988 // stack pointer and PC. In this case the context function will be
    989 // called from C code.
    990 //
    991 // If the Context field is not 0, then it is a value returned by a
    992 // previous call to the context function. This case is called when the
    993 // context is no longer needed; that is, when the Go code is returning
    994 // to its C code caller. This permits the context function to release
    995 // any associated resources.
    996 //
    997 // While it would be correct for the context function to record a
    998 // complete a stack trace whenever it is called, and simply copy that
    999 // out in the traceback function, in a typical program the context
   1000 // function will be called many times without ever recording a
   1001 // traceback for that context. Recording a complete stack trace in a
   1002 // call to the context function is likely to be inefficient.
   1003 //
   1004 // The traceback function will be called with a single argument, a
   1005 // pointer to a struct:
   1006 //
   1007 //	struct {
   1008 //		Context    uintptr
   1009 //		SigContext uintptr
   1010 //		Buf        *uintptr
   1011 //		Max        uintptr
   1012 //	}
   1013 //
   1014 // In C syntax, this struct will be
   1015 //
   1016 //	struct {
   1017 //		uintptr_t  Context;
   1018 //		uintptr_t  SigContext;
   1019 //		uintptr_t* Buf;
   1020 //		uintptr_t  Max;
   1021 //	};
   1022 //
   1023 // The Context field will be zero to gather a traceback from the
   1024 // current program execution point. In this case, the traceback
   1025 // function will be called from C code.
   1026 //
   1027 // Otherwise Context will be a value previously returned by a call to
   1028 // the context function. The traceback function should gather a stack
   1029 // trace from that saved point in the program execution. The traceback
   1030 // function may be called from an execution thread other than the one
   1031 // that recorded the context, but only when the context is known to be
   1032 // valid and unchanging. The traceback function may also be called
   1033 // deeper in the call stack on the same thread that recorded the
   1034 // context. The traceback function may be called multiple times with
   1035 // the same Context value; it will usually be appropriate to cache the
   1036 // result, if possible, the first time this is called for a specific
   1037 // context value.
   1038 //
   1039 // If the traceback function is called from a signal handler on a Unix
   1040 // system, SigContext will be the signal context argument passed to
   1041 // the signal handler (a C ucontext_t* cast to uintptr_t). This may be
   1042 // used to start tracing at the point where the signal occurred. If
   1043 // the traceback function is not called from a signal handler,
   1044 // SigContext will be zero.
   1045 //
   1046 // Buf is where the traceback information should be stored. It should
   1047 // be PC values, such that Buf[0] is the PC of the caller, Buf[1] is
   1048 // the PC of that function's caller, and so on.  Max is the maximum
   1049 // number of entries to store.  The function should store a zero to
   1050 // indicate the top of the stack, or that the caller is on a different
   1051 // stack, presumably a Go stack.
   1052 //
   1053 // Unlike runtime.Callers, the PC values returned should, when passed
   1054 // to the symbolizer function, return the file/line of the call
   1055 // instruction.  No additional subtraction is required or appropriate.
   1056 //
   1057 // The symbolizer function will be called with a single argument, a
   1058 // pointer to a struct:
   1059 //
   1060 //	struct {
   1061 //		PC      uintptr // program counter to fetch information for
   1062 //		File    *byte   // file name (NUL terminated)
   1063 //		Lineno  uintptr // line number
   1064 //		Func    *byte   // function name (NUL terminated)
   1065 //		Entry   uintptr // function entry point
   1066 //		More    uintptr // set non-zero if more info for this PC
   1067 //		Data    uintptr // unused by runtime, available for function
   1068 //	}
   1069 //
   1070 // In C syntax, this struct will be
   1071 //
   1072 //	struct {
   1073 //		uintptr_t PC;
   1074 //		char*     File;
   1075 //		uintptr_t Lineno;
   1076 //		char*     Func;
   1077 //		uintptr_t Entry;
   1078 //		uintptr_t More;
   1079 //		uintptr_t Data;
   1080 //	};
   1081 //
   1082 // The PC field will be a value returned by a call to the traceback
   1083 // function.
   1084 //
   1085 // The first time the function is called for a particular traceback,
   1086 // all the fields except PC will be 0. The function should fill in the
   1087 // other fields if possible, setting them to 0/nil if the information
   1088 // is not available. The Data field may be used to store any useful
   1089 // information across calls. The More field should be set to non-zero
   1090 // if there is more information for this PC, zero otherwise. If More
   1091 // is set non-zero, the function will be called again with the same
   1092 // PC, and may return different information (this is intended for use
   1093 // with inlined functions). If More is zero, the function will be
   1094 // called with the next PC value in the traceback. When the traceback
   1095 // is complete, the function will be called once more with PC set to
   1096 // zero; this may be used to free any information. Each call will
   1097 // leave the fields of the struct set to the same values they had upon
   1098 // return, except for the PC field when the More field is zero. The
   1099 // function must not keep a copy of the struct pointer between calls.
   1100 //
   1101 // When calling SetCgoTraceback, the version argument is the version
   1102 // number of the structs that the functions expect to receive.
   1103 // Currently this must be zero.
   1104 //
   1105 // The symbolizer function may be nil, in which case the results of
   1106 // the traceback function will be displayed as numbers. If the
   1107 // traceback function is nil, the symbolizer function will never be
   1108 // called. The context function may be nil, in which case the
   1109 // traceback function will only be called with the context field set
   1110 // to zero.  If the context function is nil, then calls from Go to C
   1111 // to Go will not show a traceback for the C portion of the call stack.
   1112 //
   1113 // SetCgoTraceback should be called only once, ideally from an init function.
   1114 func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) {
   1115 	if version != 0 {
   1116 		panic("unsupported version")
   1117 	}
   1118 
   1119 	if cgoTraceback != nil && cgoTraceback != traceback ||
   1120 		cgoContext != nil && cgoContext != context ||
   1121 		cgoSymbolizer != nil && cgoSymbolizer != symbolizer {
   1122 		panic("call SetCgoTraceback only once")
   1123 	}
   1124 
   1125 	cgoTraceback = traceback
   1126 	cgoContext = context
   1127 	cgoSymbolizer = symbolizer
   1128 
   1129 	// The context function is called when a C function calls a Go
   1130 	// function. As such it is only called by C code in runtime/cgo.
   1131 	if _cgo_set_context_function != nil {
   1132 		cgocall(_cgo_set_context_function, context)
   1133 	}
   1134 }
   1135 
   1136 var cgoTraceback unsafe.Pointer
   1137 var cgoContext unsafe.Pointer
   1138 var cgoSymbolizer unsafe.Pointer
   1139 
   1140 // cgoTracebackArg is the type passed to cgoTraceback.
   1141 type cgoTracebackArg struct {
   1142 	context    uintptr
   1143 	sigContext uintptr
   1144 	buf        *uintptr
   1145 	max        uintptr
   1146 }
   1147 
   1148 // cgoContextArg is the type passed to the context function.
   1149 type cgoContextArg struct {
   1150 	context uintptr
   1151 }
   1152 
   1153 // cgoSymbolizerArg is the type passed to cgoSymbolizer.
   1154 type cgoSymbolizerArg struct {
   1155 	pc       uintptr
   1156 	file     *byte
   1157 	lineno   uintptr
   1158 	funcName *byte
   1159 	entry    uintptr
   1160 	more     uintptr
   1161 	data     uintptr
   1162 }
   1163 
   1164 // cgoTraceback prints a traceback of callers.
   1165 func printCgoTraceback(callers *cgoCallers) {
   1166 	if cgoSymbolizer == nil {
   1167 		for _, c := range callers {
   1168 			if c == 0 {
   1169 				break
   1170 			}
   1171 			print("non-Go function at pc=", hex(c), "\n")
   1172 		}
   1173 		return
   1174 	}
   1175 
   1176 	var arg cgoSymbolizerArg
   1177 	for _, c := range callers {
   1178 		if c == 0 {
   1179 			break
   1180 		}
   1181 		printOneCgoTraceback(c, 0x7fffffff, &arg)
   1182 	}
   1183 	arg.pc = 0
   1184 	callCgoSymbolizer(&arg)
   1185 }
   1186 
   1187 // printOneCgoTraceback prints the traceback of a single cgo caller.
   1188 // This can print more than one line because of inlining.
   1189 // Returns the number of frames printed.
   1190 func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int {
   1191 	c := 0
   1192 	arg.pc = pc
   1193 	for {
   1194 		if c > max {
   1195 			break
   1196 		}
   1197 		callCgoSymbolizer(arg)
   1198 		if arg.funcName != nil {
   1199 			// Note that we don't print any argument
   1200 			// information here, not even parentheses.
   1201 			// The symbolizer must add that if appropriate.
   1202 			println(gostringnocopy(arg.funcName))
   1203 		} else {
   1204 			println("non-Go function")
   1205 		}
   1206 		print("\t")
   1207 		if arg.file != nil {
   1208 			print(gostringnocopy(arg.file), ":", arg.lineno, " ")
   1209 		}
   1210 		print("pc=", hex(pc), "\n")
   1211 		c++
   1212 		if arg.more == 0 {
   1213 			break
   1214 		}
   1215 	}
   1216 	return c
   1217 }
   1218 
   1219 // callCgoSymbolizer calls the cgoSymbolizer function.
   1220 func callCgoSymbolizer(arg *cgoSymbolizerArg) {
   1221 	call := cgocall
   1222 	if panicking > 0 || getg().m.curg != getg() {
   1223 		// We do not want to call into the scheduler when panicking
   1224 		// or when on the system stack.
   1225 		call = asmcgocall
   1226 	}
   1227 	if msanenabled {
   1228 		msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
   1229 	}
   1230 	call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
   1231 }
   1232 
   1233 // cgoContextPCs gets the PC values from a cgo traceback.
   1234 func cgoContextPCs(ctxt uintptr, buf []uintptr) {
   1235 	if cgoTraceback == nil {
   1236 		return
   1237 	}
   1238 	call := cgocall
   1239 	if panicking > 0 || getg().m.curg != getg() {
   1240 		// We do not want to call into the scheduler when panicking
   1241 		// or when on the system stack.
   1242 		call = asmcgocall
   1243 	}
   1244 	arg := cgoTracebackArg{
   1245 		context: ctxt,
   1246 		buf:     (*uintptr)(noescape(unsafe.Pointer(&buf[0]))),
   1247 		max:     uintptr(len(buf)),
   1248 	}
   1249 	if msanenabled {
   1250 		msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
   1251 	}
   1252 	call(cgoTraceback, noescape(unsafe.Pointer(&arg)))
   1253 }
   1254