Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/atomic"
      9 	"runtime/internal/sys"
     10 	"unsafe"
     11 )
     12 
     13 // Frames may be used to get function/file/line information for a
     14 // slice of PC values returned by Callers.
     15 type Frames struct {
     16 	// callers is a slice of PCs that have not yet been expanded.
     17 	callers []uintptr
     18 
     19 	// stackExpander expands callers into a sequence of Frames,
     20 	// tracking the necessary state across PCs.
     21 	stackExpander stackExpander
     22 
     23 	// elideWrapper indicates that, if the next frame is an
     24 	// autogenerated wrapper function, it should be elided from
     25 	// the stack.
     26 	elideWrapper bool
     27 }
     28 
     29 // Frame is the information returned by Frames for each call frame.
     30 type Frame struct {
     31 	// PC is the program counter for the location in this frame.
     32 	// For a frame that calls another frame, this will be the
     33 	// program counter of a call instruction. Because of inlining,
     34 	// multiple frames may have the same PC value, but different
     35 	// symbolic information.
     36 	PC uintptr
     37 
     38 	// Func is the Func value of this call frame. This may be nil
     39 	// for non-Go code or fully inlined functions.
     40 	Func *Func
     41 
     42 	// Function is the package path-qualified function name of
     43 	// this call frame. If non-empty, this string uniquely
     44 	// identifies a single function in the program.
     45 	// This may be the empty string if not known.
     46 	// If Func is not nil then Function == Func.Name().
     47 	Function string
     48 
     49 	// File and Line are the file name and line number of the
     50 	// location in this frame. For non-leaf frames, this will be
     51 	// the location of a call. These may be the empty string and
     52 	// zero, respectively, if not known.
     53 	File string
     54 	Line int
     55 
     56 	// Entry point program counter for the function; may be zero
     57 	// if not known. If Func is not nil then Entry ==
     58 	// Func.Entry().
     59 	Entry uintptr
     60 }
     61 
     62 // stackExpander expands a call stack of PCs into a sequence of
     63 // Frames. It tracks state across PCs necessary to perform this
     64 // expansion.
     65 //
     66 // This is the core of the Frames implementation, but is a separate
     67 // internal API to make it possible to use within the runtime without
     68 // heap-allocating the PC slice. The only difference with the public
     69 // Frames API is that the caller is responsible for threading the PC
     70 // slice between expansion steps in this API. If escape analysis were
     71 // smarter, we may not need this (though it may have to be a lot
     72 // smarter).
     73 type stackExpander struct {
     74 	// pcExpander expands the current PC into a sequence of Frames.
     75 	pcExpander pcExpander
     76 
     77 	// If previous caller in iteration was a panic, then the next
     78 	// PC in the call stack is the address of the faulting
     79 	// instruction instead of the return address of the call.
     80 	wasPanic bool
     81 
     82 	// skip > 0 indicates that skip frames in the expansion of the
     83 	// first PC should be skipped over and callers[1] should also
     84 	// be skipped.
     85 	skip int
     86 }
     87 
     88 // CallersFrames takes a slice of PC values returned by Callers and
     89 // prepares to return function/file/line information.
     90 // Do not change the slice until you are done with the Frames.
     91 func CallersFrames(callers []uintptr) *Frames {
     92 	ci := &Frames{}
     93 	ci.callers = ci.stackExpander.init(callers)
     94 	return ci
     95 }
     96 
     97 func (se *stackExpander) init(callers []uintptr) []uintptr {
     98 	if len(callers) >= 1 {
     99 		pc := callers[0]
    100 		s := pc - skipPC
    101 		if s >= 0 && s < sizeofSkipFunction {
    102 			// Ignore skip frame callers[0] since this means the caller trimmed the PC slice.
    103 			return callers[1:]
    104 		}
    105 	}
    106 	if len(callers) >= 2 {
    107 		pc := callers[1]
    108 		s := pc - skipPC
    109 		if s > 0 && s < sizeofSkipFunction {
    110 			// Skip the first s inlined frames when we expand the first PC.
    111 			se.skip = int(s)
    112 		}
    113 	}
    114 	return callers
    115 }
    116 
    117 // Next returns frame information for the next caller.
    118 // If more is false, there are no more callers (the Frame value is valid).
    119 func (ci *Frames) Next() (frame Frame, more bool) {
    120 	ci.callers, frame, more = ci.stackExpander.next(ci.callers, ci.elideWrapper)
    121 	ci.elideWrapper = elideWrapperCalling(frame.Function)
    122 	return
    123 }
    124 
    125 func (se *stackExpander) next(callers []uintptr, elideWrapper bool) (ncallers []uintptr, frame Frame, more bool) {
    126 	ncallers = callers
    127 again:
    128 	if !se.pcExpander.more {
    129 		// Expand the next PC.
    130 		if len(ncallers) == 0 {
    131 			se.wasPanic = false
    132 			return ncallers, Frame{}, false
    133 		}
    134 		se.pcExpander.init(ncallers[0], se.wasPanic)
    135 		ncallers = ncallers[1:]
    136 		se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.entry == sigpanicPC
    137 		if se.skip > 0 {
    138 			for ; se.skip > 0; se.skip-- {
    139 				se.pcExpander.next()
    140 			}
    141 			se.skip = 0
    142 			// Drop skipPleaseUseCallersFrames.
    143 			ncallers = ncallers[1:]
    144 		}
    145 		if !se.pcExpander.more {
    146 			// No symbolic information for this PC.
    147 			// However, we return at least one frame for
    148 			// every PC, so return an invalid frame.
    149 			return ncallers, Frame{}, len(ncallers) > 0
    150 		}
    151 	}
    152 
    153 	frame = se.pcExpander.next()
    154 	if elideWrapper && frame.File == "<autogenerated>" {
    155 		// Ignore autogenerated functions such as pointer
    156 		// method forwarding functions. These are an
    157 		// implementation detail that doesn't reflect the
    158 		// source code.
    159 		goto again
    160 	}
    161 	return ncallers, frame, se.pcExpander.more || len(ncallers) > 0
    162 }
    163 
    164 // A pcExpander expands a single PC into a sequence of Frames.
    165 type pcExpander struct {
    166 	// more indicates that the next call to next will return a
    167 	// valid frame.
    168 	more bool
    169 
    170 	// pc is the pc being expanded.
    171 	pc uintptr
    172 
    173 	// frames is a pre-expanded set of Frames to return from the
    174 	// iterator. If this is set, then this is everything that will
    175 	// be returned from the iterator.
    176 	frames []Frame
    177 
    178 	// funcInfo is the funcInfo of the function containing pc.
    179 	funcInfo funcInfo
    180 
    181 	// inlTree is the inlining tree of the function containing pc.
    182 	inlTree *[1 << 20]inlinedCall
    183 
    184 	// file and line are the file name and line number of the next
    185 	// frame.
    186 	file string
    187 	line int32
    188 
    189 	// inlIndex is the inlining index of the next frame, or -1 if
    190 	// the next frame is an outermost frame.
    191 	inlIndex int32
    192 }
    193 
    194 // init initializes this pcExpander to expand pc. It sets ex.more if
    195 // pc expands to any Frames.
    196 //
    197 // A pcExpander can be reused by calling init again.
    198 //
    199 // If pc was a "call" to sigpanic, panicCall should be true. In this
    200 // case, pc is treated as the address of a faulting instruction
    201 // instead of the return address of a call.
    202 func (ex *pcExpander) init(pc uintptr, panicCall bool) {
    203 	ex.more = false
    204 
    205 	ex.funcInfo = findfunc(pc)
    206 	if !ex.funcInfo.valid() {
    207 		if cgoSymbolizer != nil {
    208 			// Pre-expand cgo frames. We could do this
    209 			// incrementally, too, but there's no way to
    210 			// avoid allocation in this case anyway.
    211 			ex.frames = expandCgoFrames(pc)
    212 			ex.more = len(ex.frames) > 0
    213 		}
    214 		return
    215 	}
    216 
    217 	ex.more = true
    218 	entry := ex.funcInfo.entry
    219 	ex.pc = pc
    220 	if ex.pc > entry && !panicCall {
    221 		ex.pc--
    222 	}
    223 
    224 	// file and line are the innermost position at pc.
    225 	ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false)
    226 
    227 	// Get inlining tree at pc
    228 	inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree)
    229 	if inldata != nil {
    230 		ex.inlTree = (*[1 << 20]inlinedCall)(inldata)
    231 		ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil)
    232 	} else {
    233 		ex.inlTree = nil
    234 		ex.inlIndex = -1
    235 	}
    236 }
    237 
    238 // next returns the next Frame in the expansion of pc and sets ex.more
    239 // if there are more Frames to follow.
    240 func (ex *pcExpander) next() Frame {
    241 	if !ex.more {
    242 		return Frame{}
    243 	}
    244 
    245 	if len(ex.frames) > 0 {
    246 		// Return pre-expended frame.
    247 		frame := ex.frames[0]
    248 		ex.frames = ex.frames[1:]
    249 		ex.more = len(ex.frames) > 0
    250 		return frame
    251 	}
    252 
    253 	if ex.inlIndex >= 0 {
    254 		// Return inner inlined frame.
    255 		call := ex.inlTree[ex.inlIndex]
    256 		frame := Frame{
    257 			PC:       ex.pc,
    258 			Func:     nil, // nil for inlined functions
    259 			Function: funcnameFromNameoff(ex.funcInfo, call.func_),
    260 			File:     ex.file,
    261 			Line:     int(ex.line),
    262 			Entry:    ex.funcInfo.entry,
    263 		}
    264 		ex.file = funcfile(ex.funcInfo, call.file)
    265 		ex.line = call.line
    266 		ex.inlIndex = call.parent
    267 		return frame
    268 	}
    269 
    270 	// No inlining or pre-expanded frames.
    271 	ex.more = false
    272 	return Frame{
    273 		PC:       ex.pc,
    274 		Func:     ex.funcInfo._Func(),
    275 		Function: funcname(ex.funcInfo),
    276 		File:     ex.file,
    277 		Line:     int(ex.line),
    278 		Entry:    ex.funcInfo.entry,
    279 	}
    280 }
    281 
    282 // expandCgoFrames expands frame information for pc, known to be
    283 // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
    284 // returns nil if pc could not be expanded.
    285 func expandCgoFrames(pc uintptr) []Frame {
    286 	arg := cgoSymbolizerArg{pc: pc}
    287 	callCgoSymbolizer(&arg)
    288 
    289 	if arg.file == nil && arg.funcName == nil {
    290 		// No useful information from symbolizer.
    291 		return nil
    292 	}
    293 
    294 	var frames []Frame
    295 	for {
    296 		frames = append(frames, Frame{
    297 			PC:       pc,
    298 			Func:     nil,
    299 			Function: gostring(arg.funcName),
    300 			File:     gostring(arg.file),
    301 			Line:     int(arg.lineno),
    302 			Entry:    arg.entry,
    303 		})
    304 		if arg.more == 0 {
    305 			break
    306 		}
    307 		callCgoSymbolizer(&arg)
    308 	}
    309 
    310 	// No more frames for this PC. Tell the symbolizer we are done.
    311 	// We don't try to maintain a single cgoSymbolizerArg for the
    312 	// whole use of Frames, because there would be no good way to tell
    313 	// the symbolizer when we are done.
    314 	arg.pc = 0
    315 	callCgoSymbolizer(&arg)
    316 
    317 	return frames
    318 }
    319 
    320 // NOTE: Func does not expose the actual unexported fields, because we return *Func
    321 // values to users, and we want to keep them from being able to overwrite the data
    322 // with (say) *f = Func{}.
    323 // All code operating on a *Func must call raw() to get the *_func
    324 // or funcInfo() to get the funcInfo instead.
    325 
    326 // A Func represents a Go function in the running binary.
    327 type Func struct {
    328 	opaque struct{} // unexported field to disallow conversions
    329 }
    330 
    331 func (f *Func) raw() *_func {
    332 	return (*_func)(unsafe.Pointer(f))
    333 }
    334 
    335 func (f *Func) funcInfo() funcInfo {
    336 	fn := f.raw()
    337 	return funcInfo{fn, findmoduledatap(fn.entry)}
    338 }
    339 
    340 // PCDATA and FUNCDATA table indexes.
    341 //
    342 // See funcdata.h and ../cmd/internal/obj/funcdata.go.
    343 const (
    344 	_PCDATA_StackMapIndex       = 0
    345 	_PCDATA_InlTreeIndex        = 1
    346 	_FUNCDATA_ArgsPointerMaps   = 0
    347 	_FUNCDATA_LocalsPointerMaps = 1
    348 	_FUNCDATA_InlTree           = 2
    349 	_ArgsSizeUnknown            = -0x80000000
    350 )
    351 
    352 // moduledata records information about the layout of the executable
    353 // image. It is written by the linker. Any changes here must be
    354 // matched changes to the code in cmd/internal/ld/symtab.go:symtab.
    355 // moduledata is stored in statically allocated non-pointer memory;
    356 // none of the pointers here are visible to the garbage collector.
    357 type moduledata struct {
    358 	pclntable    []byte
    359 	ftab         []functab
    360 	filetab      []uint32
    361 	findfunctab  uintptr
    362 	minpc, maxpc uintptr
    363 
    364 	text, etext           uintptr
    365 	noptrdata, enoptrdata uintptr
    366 	data, edata           uintptr
    367 	bss, ebss             uintptr
    368 	noptrbss, enoptrbss   uintptr
    369 	end, gcdata, gcbss    uintptr
    370 	types, etypes         uintptr
    371 
    372 	textsectmap []textsect
    373 	typelinks   []int32 // offsets from types
    374 	itablinks   []*itab
    375 
    376 	ptab []ptabEntry
    377 
    378 	pluginpath string
    379 	pkghashes  []modulehash
    380 
    381 	modulename   string
    382 	modulehashes []modulehash
    383 
    384 	hasmain uint8 // 1 if module contains the main function, 0 otherwise
    385 
    386 	gcdatamask, gcbssmask bitvector
    387 
    388 	typemap map[typeOff]*_type // offset to *_rtype in previous module
    389 
    390 	bad bool // module failed to load and should be ignored
    391 
    392 	next *moduledata
    393 }
    394 
    395 // A modulehash is used to compare the ABI of a new module or a
    396 // package in a new module with the loaded program.
    397 //
    398 // For each shared library a module links against, the linker creates an entry in the
    399 // moduledata.modulehashes slice containing the name of the module, the abi hash seen
    400 // at link time and a pointer to the runtime abi hash. These are checked in
    401 // moduledataverify1 below.
    402 //
    403 // For each loaded plugin, the pkghashes slice has a modulehash of the
    404 // newly loaded package that can be used to check the plugin's version of
    405 // a package against any previously loaded version of the package.
    406 // This is done in plugin.lastmoduleinit.
    407 type modulehash struct {
    408 	modulename   string
    409 	linktimehash string
    410 	runtimehash  *string
    411 }
    412 
    413 // pinnedTypemaps are the map[typeOff]*_type from the moduledata objects.
    414 //
    415 // These typemap objects are allocated at run time on the heap, but the
    416 // only direct reference to them is in the moduledata, created by the
    417 // linker and marked SNOPTRDATA so it is ignored by the GC.
    418 //
    419 // To make sure the map isn't collected, we keep a second reference here.
    420 var pinnedTypemaps []map[typeOff]*_type
    421 
    422 var firstmoduledata moduledata  // linker symbol
    423 var lastmoduledatap *moduledata // linker symbol
    424 var modulesSlice *[]*moduledata // see activeModules
    425 
    426 // activeModules returns a slice of active modules.
    427 //
    428 // A module is active once its gcdatamask and gcbssmask have been
    429 // assembled and it is usable by the GC.
    430 //
    431 // This is nosplit/nowritebarrier because it is called by the
    432 // cgo pointer checking code.
    433 //go:nosplit
    434 //go:nowritebarrier
    435 func activeModules() []*moduledata {
    436 	p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
    437 	if p == nil {
    438 		return nil
    439 	}
    440 	return *p
    441 }
    442 
    443 // modulesinit creates the active modules slice out of all loaded modules.
    444 //
    445 // When a module is first loaded by the dynamic linker, an .init_array
    446 // function (written by cmd/link) is invoked to call addmoduledata,
    447 // appending to the module to the linked list that starts with
    448 // firstmoduledata.
    449 //
    450 // There are two times this can happen in the lifecycle of a Go
    451 // program. First, if compiled with -linkshared, a number of modules
    452 // built with -buildmode=shared can be loaded at program initialization.
    453 // Second, a Go program can load a module while running that was built
    454 // with -buildmode=plugin.
    455 //
    456 // After loading, this function is called which initializes the
    457 // moduledata so it is usable by the GC and creates a new activeModules
    458 // list.
    459 //
    460 // Only one goroutine may call modulesinit at a time.
    461 func modulesinit() {
    462 	modules := new([]*moduledata)
    463 	for md := &firstmoduledata; md != nil; md = md.next {
    464 		if md.bad {
    465 			continue
    466 		}
    467 		*modules = append(*modules, md)
    468 		if md.gcdatamask == (bitvector{}) {
    469 			md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), md.edata-md.data)
    470 			md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss)
    471 		}
    472 	}
    473 
    474 	// Modules appear in the moduledata linked list in the order they are
    475 	// loaded by the dynamic loader, with one exception: the
    476 	// firstmoduledata itself the module that contains the runtime. This
    477 	// is not always the first module (when using -buildmode=shared, it
    478 	// is typically libstd.so, the second module). The order matters for
    479 	// typelinksinit, so we swap the first module with whatever module
    480 	// contains the main function.
    481 	//
    482 	// See Issue #18729.
    483 	for i, md := range *modules {
    484 		if md.hasmain != 0 {
    485 			(*modules)[0] = md
    486 			(*modules)[i] = &firstmoduledata
    487 			break
    488 		}
    489 	}
    490 
    491 	atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules))
    492 }
    493 
    494 type functab struct {
    495 	entry   uintptr
    496 	funcoff uintptr
    497 }
    498 
    499 // Mapping information for secondary text sections
    500 
    501 type textsect struct {
    502 	vaddr    uintptr // prelinked section vaddr
    503 	length   uintptr // section length
    504 	baseaddr uintptr // relocated section address
    505 }
    506 
    507 const minfunc = 16                 // minimum function size
    508 const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table
    509 
    510 // findfunctab is an array of these structures.
    511 // Each bucket represents 4096 bytes of the text segment.
    512 // Each subbucket represents 256 bytes of the text segment.
    513 // To find a function given a pc, locate the bucket and subbucket for
    514 // that pc. Add together the idx and subbucket value to obtain a
    515 // function index. Then scan the functab array starting at that
    516 // index to find the target function.
    517 // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
    518 type findfuncbucket struct {
    519 	idx        uint32
    520 	subbuckets [16]byte
    521 }
    522 
    523 func moduledataverify() {
    524 	for datap := &firstmoduledata; datap != nil; datap = datap.next {
    525 		moduledataverify1(datap)
    526 	}
    527 }
    528 
    529 const debugPcln = false
    530 
    531 func moduledataverify1(datap *moduledata) {
    532 	// See golang.org/s/go12symtab for header: 0xfffffffb,
    533 	// two zero bytes, a byte giving the PC quantum,
    534 	// and a byte giving the pointer width in bytes.
    535 	pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable))
    536 	pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable))
    537 	if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize {
    538 		println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
    539 		throw("invalid function symbol table\n")
    540 	}
    541 
    542 	// ftab is lookup table for function by program counter.
    543 	nftab := len(datap.ftab) - 1
    544 	for i := 0; i < nftab; i++ {
    545 		// NOTE: ftab[nftab].entry is legal; it is the address beyond the final function.
    546 		if datap.ftab[i].entry > datap.ftab[i+1].entry {
    547 			f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap}
    548 			f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap}
    549 			f2name := "end"
    550 			if i+1 < nftab {
    551 				f2name = funcname(f2)
    552 			}
    553 			println("function symbol table not sorted by program counter:", hex(datap.ftab[i].entry), funcname(f1), ">", hex(datap.ftab[i+1].entry), f2name)
    554 			for j := 0; j <= i; j++ {
    555 				print("\t", hex(datap.ftab[j].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}), "\n")
    556 			}
    557 			throw("invalid runtime symbol table")
    558 		}
    559 	}
    560 
    561 	if datap.minpc != datap.ftab[0].entry ||
    562 		datap.maxpc != datap.ftab[nftab].entry {
    563 		throw("minpc or maxpc invalid")
    564 	}
    565 
    566 	for _, modulehash := range datap.modulehashes {
    567 		if modulehash.linktimehash != *modulehash.runtimehash {
    568 			println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename)
    569 			throw("abi mismatch")
    570 		}
    571 	}
    572 }
    573 
    574 // FuncForPC returns a *Func describing the function that contains the
    575 // given program counter address, or else nil.
    576 //
    577 // If pc represents multiple functions because of inlining, it returns
    578 // the *Func describing the outermost function.
    579 func FuncForPC(pc uintptr) *Func {
    580 	return findfunc(pc)._Func()
    581 }
    582 
    583 // Name returns the name of the function.
    584 func (f *Func) Name() string {
    585 	if f == nil {
    586 		return ""
    587 	}
    588 	return funcname(f.funcInfo())
    589 }
    590 
    591 // Entry returns the entry address of the function.
    592 func (f *Func) Entry() uintptr {
    593 	return f.raw().entry
    594 }
    595 
    596 // FileLine returns the file name and line number of the
    597 // source code corresponding to the program counter pc.
    598 // The result will not be accurate if pc is not a program
    599 // counter within f.
    600 func (f *Func) FileLine(pc uintptr) (file string, line int) {
    601 	// Pass strict=false here, because anyone can call this function,
    602 	// and they might just be wrong about targetpc belonging to f.
    603 	file, line32 := funcline1(f.funcInfo(), pc, false)
    604 	return file, int(line32)
    605 }
    606 
    607 func findmoduledatap(pc uintptr) *moduledata {
    608 	for datap := &firstmoduledata; datap != nil; datap = datap.next {
    609 		if datap.minpc <= pc && pc < datap.maxpc {
    610 			return datap
    611 		}
    612 	}
    613 	return nil
    614 }
    615 
    616 type funcInfo struct {
    617 	*_func
    618 	datap *moduledata
    619 }
    620 
    621 func (f funcInfo) valid() bool {
    622 	return f._func != nil
    623 }
    624 
    625 func (f funcInfo) _Func() *Func {
    626 	return (*Func)(unsafe.Pointer(f._func))
    627 }
    628 
    629 func findfunc(pc uintptr) funcInfo {
    630 	datap := findmoduledatap(pc)
    631 	if datap == nil {
    632 		return funcInfo{}
    633 	}
    634 	const nsub = uintptr(len(findfuncbucket{}.subbuckets))
    635 
    636 	x := pc - datap.minpc
    637 	b := x / pcbucketsize
    638 	i := x % pcbucketsize / (pcbucketsize / nsub)
    639 
    640 	ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
    641 	idx := ffb.idx + uint32(ffb.subbuckets[i])
    642 
    643 	// If the idx is beyond the end of the ftab, set it to the end of the table and search backward.
    644 	// This situation can occur if multiple text sections are generated to handle large text sections
    645 	// and the linker has inserted jump tables between them.
    646 
    647 	if idx >= uint32(len(datap.ftab)) {
    648 		idx = uint32(len(datap.ftab) - 1)
    649 	}
    650 	if pc < datap.ftab[idx].entry {
    651 
    652 		// With multiple text sections, the idx might reference a function address that
    653 		// is higher than the pc being searched, so search backward until the matching address is found.
    654 
    655 		for datap.ftab[idx].entry > pc && idx > 0 {
    656 			idx--
    657 		}
    658 		if idx == 0 {
    659 			throw("findfunc: bad findfunctab entry idx")
    660 		}
    661 	} else {
    662 
    663 		// linear search to find func with pc >= entry.
    664 		for datap.ftab[idx+1].entry <= pc {
    665 			idx++
    666 		}
    667 	}
    668 	return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[idx].funcoff])), datap}
    669 }
    670 
    671 type pcvalueCache struct {
    672 	entries [16]pcvalueCacheEnt
    673 }
    674 
    675 type pcvalueCacheEnt struct {
    676 	// targetpc and off together are the key of this cache entry.
    677 	targetpc uintptr
    678 	off      int32
    679 	// val is the value of this cached pcvalue entry.
    680 	val int32
    681 }
    682 
    683 func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
    684 	if off == 0 {
    685 		return -1
    686 	}
    687 
    688 	// Check the cache. This speeds up walks of deep stacks, which
    689 	// tend to have the same recursive functions over and over.
    690 	//
    691 	// This cache is small enough that full associativity is
    692 	// cheaper than doing the hashing for a less associative
    693 	// cache.
    694 	if cache != nil {
    695 		for i := range cache.entries {
    696 			// We check off first because we're more
    697 			// likely to have multiple entries with
    698 			// different offsets for the same targetpc
    699 			// than the other way around, so we'll usually
    700 			// fail in the first clause.
    701 			ent := &cache.entries[i]
    702 			if ent.off == off && ent.targetpc == targetpc {
    703 				return ent.val
    704 			}
    705 		}
    706 	}
    707 
    708 	if !f.valid() {
    709 		if strict && panicking == 0 {
    710 			print("runtime: no module data for ", hex(f.entry), "\n")
    711 			throw("no module data")
    712 		}
    713 		return -1
    714 	}
    715 	datap := f.datap
    716 	p := datap.pclntable[off:]
    717 	pc := f.entry
    718 	val := int32(-1)
    719 	for {
    720 		var ok bool
    721 		p, ok = step(p, &pc, &val, pc == f.entry)
    722 		if !ok {
    723 			break
    724 		}
    725 		if targetpc < pc {
    726 			// Replace a random entry in the cache. Random
    727 			// replacement prevents a performance cliff if
    728 			// a recursive stack's cycle is slightly
    729 			// larger than the cache.
    730 			if cache != nil {
    731 				ci := fastrandn(uint32(len(cache.entries)))
    732 				cache.entries[ci] = pcvalueCacheEnt{
    733 					targetpc: targetpc,
    734 					off:      off,
    735 					val:      val,
    736 				}
    737 			}
    738 
    739 			return val
    740 		}
    741 	}
    742 
    743 	// If there was a table, it should have covered all program counters.
    744 	// If not, something is wrong.
    745 	if panicking != 0 || !strict {
    746 		return -1
    747 	}
    748 
    749 	print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n")
    750 
    751 	p = datap.pclntable[off:]
    752 	pc = f.entry
    753 	val = -1
    754 	for {
    755 		var ok bool
    756 		p, ok = step(p, &pc, &val, pc == f.entry)
    757 		if !ok {
    758 			break
    759 		}
    760 		print("\tvalue=", val, " until pc=", hex(pc), "\n")
    761 	}
    762 
    763 	throw("invalid runtime symbol table")
    764 	return -1
    765 }
    766 
    767 func cfuncname(f funcInfo) *byte {
    768 	if !f.valid() || f.nameoff == 0 {
    769 		return nil
    770 	}
    771 	return &f.datap.pclntable[f.nameoff]
    772 }
    773 
    774 func funcname(f funcInfo) string {
    775 	return gostringnocopy(cfuncname(f))
    776 }
    777 
    778 func funcnameFromNameoff(f funcInfo, nameoff int32) string {
    779 	datap := f.datap
    780 	if !f.valid() {
    781 		return ""
    782 	}
    783 	cstr := &datap.pclntable[nameoff]
    784 	return gostringnocopy(cstr)
    785 }
    786 
    787 func funcfile(f funcInfo, fileno int32) string {
    788 	datap := f.datap
    789 	if !f.valid() {
    790 		return "?"
    791 	}
    792 	return gostringnocopy(&datap.pclntable[datap.filetab[fileno]])
    793 }
    794 
    795 func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) {
    796 	datap := f.datap
    797 	if !f.valid() {
    798 		return "?", 0
    799 	}
    800 	fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict))
    801 	line = pcvalue(f, f.pcln, targetpc, nil, strict)
    802 	if fileno == -1 || line == -1 || fileno >= len(datap.filetab) {
    803 		// print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
    804 		return "?", 0
    805 	}
    806 	file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]])
    807 	return
    808 }
    809 
    810 func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
    811 	return funcline1(f, targetpc, true)
    812 }
    813 
    814 func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
    815 	x := pcvalue(f, f.pcsp, targetpc, cache, true)
    816 	if x&(sys.PtrSize-1) != 0 {
    817 		print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
    818 	}
    819 	return x
    820 }
    821 
    822 func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 {
    823 	if table < 0 || table >= f.npcdata {
    824 		return -1
    825 	}
    826 	off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
    827 	return pcvalue(f, off, targetpc, cache, true)
    828 }
    829 
    830 func funcdata(f funcInfo, i int32) unsafe.Pointer {
    831 	if i < 0 || i >= f.nfuncdata {
    832 		return nil
    833 	}
    834 	p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
    835 	if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
    836 		if uintptr(unsafe.Pointer(f._func))&4 != 0 {
    837 			println("runtime: misaligned func", f._func)
    838 		}
    839 		p = add(p, 4)
    840 	}
    841 	return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
    842 }
    843 
    844 // step advances to the next pc, value pair in the encoded table.
    845 func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
    846 	// For both uvdelta and pcdelta, the common case (~70%)
    847 	// is that they are a single byte. If so, avoid calling readvarint.
    848 	uvdelta := uint32(p[0])
    849 	if uvdelta == 0 && !first {
    850 		return nil, false
    851 	}
    852 	n := uint32(1)
    853 	if uvdelta&0x80 != 0 {
    854 		n, uvdelta = readvarint(p)
    855 	}
    856 	p = p[n:]
    857 	if uvdelta&1 != 0 {
    858 		uvdelta = ^(uvdelta >> 1)
    859 	} else {
    860 		uvdelta >>= 1
    861 	}
    862 	vdelta := int32(uvdelta)
    863 	pcdelta := uint32(p[0])
    864 	n = 1
    865 	if pcdelta&0x80 != 0 {
    866 		n, pcdelta = readvarint(p)
    867 	}
    868 	p = p[n:]
    869 	*pc += uintptr(pcdelta * sys.PCQuantum)
    870 	*val += vdelta
    871 	return p, true
    872 }
    873 
    874 // readvarint reads a varint from p.
    875 func readvarint(p []byte) (read uint32, val uint32) {
    876 	var v, shift, n uint32
    877 	for {
    878 		b := p[n]
    879 		n++
    880 		v |= uint32(b&0x7F) << (shift & 31)
    881 		if b&0x80 == 0 {
    882 			break
    883 		}
    884 		shift += 7
    885 	}
    886 	return n, v
    887 }
    888 
    889 type stackmap struct {
    890 	n        int32   // number of bitmaps
    891 	nbit     int32   // number of bits in each bitmap
    892 	bytedata [1]byte // bitmaps, each starting on a byte boundary
    893 }
    894 
    895 //go:nowritebarrier
    896 func stackmapdata(stkmap *stackmap, n int32) bitvector {
    897 	if n < 0 || n >= stkmap.n {
    898 		throw("stackmapdata: index out of range")
    899 	}
    900 	return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+7)>>3))))}
    901 }
    902 
    903 // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
    904 type inlinedCall struct {
    905 	parent int32 // index of parent in the inltree, or < 0
    906 	file   int32 // fileno index into filetab
    907 	line   int32 // line number of the call site
    908 	func_  int32 // offset into pclntab for name of called function
    909 }
    910