Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Cgo call and callback support.
      6 //
      7 // To call into the C function f from Go, the cgo-generated code calls
      8 // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
      9 // gcc-compiled function written by cgo.
     10 //
     11 // runtime.cgocall (below) locks g to m, calls entersyscall
     12 // so as not to block other goroutines or the garbage collector,
     13 // and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame).
     14 //
     15 // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
     16 // (assumed to be an operating system-allocated stack, so safe to run
     17 // gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
     18 //
     19 // _cgo_Cfunc_f invokes the actual C function f with arguments
     20 // taken from the frame structure, records the results in the frame,
     21 // and returns to runtime.asmcgocall.
     22 //
     23 // After it regains control, runtime.asmcgocall switches back to the
     24 // original g (m->curg)'s stack and returns to runtime.cgocall.
     25 //
     26 // After it regains control, runtime.cgocall calls exitsyscall, which blocks
     27 // until this m can run Go code without violating the $GOMAXPROCS limit,
     28 // and then unlocks g from m.
     29 //
     30 // The above description skipped over the possibility of the gcc-compiled
     31 // function f calling back into Go. If that happens, we continue down
     32 // the rabbit hole during the execution of f.
     33 //
     34 // To make it possible for gcc-compiled C code to call a Go function p.GoF,
     35 // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
     36 // know about packages).  The gcc-compiled C function f calls GoF.
     37 //
     38 // GoF calls crosscall2(_cgoexp_GoF, frame, framesize).  Crosscall2
     39 // (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument
     40 // adapter from the gcc function call ABI to the 6c function call ABI.
     41 // It is called from gcc to call 6c functions. In this case it calls
     42 // _cgoexp_GoF(frame, framesize), still running on m->g0's stack
     43 // and outside the $GOMAXPROCS limit. Thus, this code cannot yet
     44 // call arbitrary Go code directly and must be careful not to allocate
     45 // memory or use up m->g0's stack.
     46 //
     47 // _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize, ctxt).
     48 // (The reason for having _cgoexp_GoF instead of writing a crosscall3
     49 // to make this call directly is that _cgoexp_GoF, because it is compiled
     50 // with 6c instead of gcc, can refer to dotted names like
     51 // runtime.cgocallback and p.GoF.)
     52 //
     53 // runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's
     54 // stack to the original g (m->curg)'s stack, on which it calls
     55 // runtime.cgocallbackg(p.GoF, frame, framesize).
     56 // As part of the stack switch, runtime.cgocallback saves the current
     57 // SP as m->g0->sched.sp, so that any use of m->g0's stack during the
     58 // execution of the callback will be done below the existing stack frames.
     59 // Before overwriting m->g0->sched.sp, it pushes the old value on the
     60 // m->g0 stack, so that it can be restored later.
     61 //
     62 // runtime.cgocallbackg (below) is now running on a real goroutine
     63 // stack (not an m->g0 stack).  First it calls runtime.exitsyscall, which will
     64 // block until the $GOMAXPROCS limit allows running this goroutine.
     65 // Once exitsyscall has returned, it is safe to do things like call the memory
     66 // allocator or invoke the Go callback function p.GoF.  runtime.cgocallbackg
     67 // first defers a function to unwind m->g0.sched.sp, so that if p.GoF
     68 // panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
     69 // and the m->curg stack will be unwound in lock step.
     70 // Then it calls p.GoF.  Finally it pops but does not execute the deferred
     71 // function, calls runtime.entersyscall, and returns to runtime.cgocallback.
     72 //
     73 // After it regains control, runtime.cgocallback switches back to
     74 // m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old
     75 // m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
     76 //
     77 // _cgoexp_GoF immediately returns to crosscall2, which restores the
     78 // callee-save registers for gcc and returns to GoF, which returns to f.
     79 
     80 package runtime
     81 
     82 import (
     83 	"runtime/internal/atomic"
     84 	"runtime/internal/sys"
     85 	"unsafe"
     86 )
     87 
     88 // Addresses collected in a cgo backtrace when crashing.
     89 // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
     90 type cgoCallers [32]uintptr
     91 
     92 // Call from Go to C.
     93 //go:nosplit
     94 func cgocall(fn, arg unsafe.Pointer) int32 {
     95 	if !iscgo && GOOS != "solaris" && GOOS != "windows" {
     96 		throw("cgocall unavailable")
     97 	}
     98 
     99 	if fn == nil {
    100 		throw("cgocall nil")
    101 	}
    102 
    103 	if raceenabled {
    104 		racereleasemerge(unsafe.Pointer(&racecgosync))
    105 	}
    106 
    107 	// Lock g to m to ensure we stay on the same stack if we do a
    108 	// cgo callback. In case of panic, unwindm calls endcgo.
    109 	lockOSThread()
    110 	mp := getg().m
    111 	mp.ncgocall++
    112 	mp.ncgo++
    113 
    114 	// Reset traceback.
    115 	mp.cgoCallers[0] = 0
    116 
    117 	// Announce we are entering a system call
    118 	// so that the scheduler knows to create another
    119 	// M to run goroutines while we are in the
    120 	// foreign code.
    121 	//
    122 	// The call to asmcgocall is guaranteed not to
    123 	// grow the stack and does not allocate memory,
    124 	// so it is safe to call while "in a system call", outside
    125 	// the $GOMAXPROCS accounting.
    126 	//
    127 	// fn may call back into Go code, in which case we'll exit the
    128 	// "system call", run the Go code (which may grow the stack),
    129 	// and then re-enter the "system call" reusing the PC and SP
    130 	// saved by entersyscall here.
    131 	entersyscall(0)
    132 	errno := asmcgocall(fn, arg)
    133 	exitsyscall(0)
    134 
    135 	// From the garbage collector's perspective, time can move
    136 	// backwards in the sequence above. If there's a callback into
    137 	// Go code, GC will see this function at the call to
    138 	// asmcgocall. When the Go call later returns to C, the
    139 	// syscall PC/SP is rolled back and the GC sees this function
    140 	// back at the call to entersyscall. Normally, fn and arg
    141 	// would be live at entersyscall and dead at asmcgocall, so if
    142 	// time moved backwards, GC would see these arguments as dead
    143 	// and then live. Prevent these undead arguments from crashing
    144 	// GC by forcing them to stay live across this time warp.
    145 	KeepAlive(fn)
    146 	KeepAlive(arg)
    147 
    148 	endcgo(mp)
    149 	return errno
    150 }
    151 
    152 //go:nosplit
    153 func endcgo(mp *m) {
    154 	mp.ncgo--
    155 
    156 	if raceenabled {
    157 		raceacquire(unsafe.Pointer(&racecgosync))
    158 	}
    159 
    160 	unlockOSThread() // invalidates mp
    161 }
    162 
    163 // Call from C back to Go.
    164 //go:nosplit
    165 func cgocallbackg(ctxt uintptr) {
    166 	gp := getg()
    167 	if gp != gp.m.curg {
    168 		println("runtime: bad g in cgocallback")
    169 		exit(2)
    170 	}
    171 
    172 	// Save current syscall parameters, so m.syscall can be
    173 	// used again if callback decide to make syscall.
    174 	syscall := gp.m.syscall
    175 
    176 	// entersyscall saves the caller's SP to allow the GC to trace the Go
    177 	// stack. However, since we're returning to an earlier stack frame and
    178 	// need to pair with the entersyscall() call made by cgocall, we must
    179 	// save syscall* and let reentersyscall restore them.
    180 	savedsp := unsafe.Pointer(gp.syscallsp)
    181 	savedpc := gp.syscallpc
    182 	exitsyscall(0) // coming out of cgo call
    183 
    184 	cgocallbackg1(ctxt)
    185 
    186 	// going back to cgo call
    187 	reentersyscall(savedpc, uintptr(savedsp))
    188 
    189 	gp.m.syscall = syscall
    190 }
    191 
    192 func cgocallbackg1(ctxt uintptr) {
    193 	gp := getg()
    194 	if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
    195 		gp.m.needextram = false
    196 		systemstack(newextram)
    197 	}
    198 
    199 	if ctxt != 0 {
    200 		s := append(gp.cgoCtxt, ctxt)
    201 
    202 		// Now we need to set gp.cgoCtxt = s, but we could get
    203 		// a SIGPROF signal while manipulating the slice, and
    204 		// the SIGPROF handler could pick up gp.cgoCtxt while
    205 		// tracing up the stack.  We need to ensure that the
    206 		// handler always sees a valid slice, so set the
    207 		// values in an order such that it always does.
    208 		p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
    209 		atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
    210 		p.cap = cap(s)
    211 		p.len = len(s)
    212 
    213 		defer func(gp *g) {
    214 			// Decrease the length of the slice by one, safely.
    215 			p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
    216 			p.len--
    217 		}(gp)
    218 	}
    219 
    220 	if gp.m.ncgo == 0 {
    221 		// The C call to Go came from a thread not currently running
    222 		// any Go. In the case of -buildmode=c-archive or c-shared,
    223 		// this call may be coming in before package initialization
    224 		// is complete. Wait until it is.
    225 		<-main_init_done
    226 	}
    227 
    228 	// Add entry to defer stack in case of panic.
    229 	restore := true
    230 	defer unwindm(&restore)
    231 
    232 	if raceenabled {
    233 		raceacquire(unsafe.Pointer(&racecgosync))
    234 	}
    235 
    236 	type args struct {
    237 		fn      *funcval
    238 		arg     unsafe.Pointer
    239 		argsize uintptr
    240 	}
    241 	var cb *args
    242 
    243 	// Location of callback arguments depends on stack frame layout
    244 	// and size of stack frame of cgocallback_gofunc.
    245 	sp := gp.m.g0.sched.sp
    246 	switch GOARCH {
    247 	default:
    248 		throw("cgocallbackg is unimplemented on arch")
    249 	case "arm":
    250 		// On arm, stack frame is two words and there's a saved LR between
    251 		// SP and the stack frame and between the stack frame and the arguments.
    252 		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
    253 	case "arm64":
    254 		// On arm64, stack frame is four words and there's a saved LR between
    255 		// SP and the stack frame and between the stack frame and the arguments.
    256 		cb = (*args)(unsafe.Pointer(sp + 5*sys.PtrSize))
    257 	case "amd64":
    258 		// On amd64, stack frame is two words, plus caller PC.
    259 		if framepointer_enabled {
    260 			// In this case, there's also saved BP.
    261 			cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
    262 			break
    263 		}
    264 		cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
    265 	case "386":
    266 		// On 386, stack frame is three words, plus caller PC.
    267 		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
    268 	case "ppc64", "ppc64le", "s390x":
    269 		// On ppc64 and s390x, the callback arguments are in the arguments area of
    270 		// cgocallback's stack frame. The stack looks like this:
    271 		// +--------------------+------------------------------+
    272 		// |                    | ...                          |
    273 		// | cgoexp_$fn         +------------------------------+
    274 		// |                    | fixed frame area             |
    275 		// +--------------------+------------------------------+
    276 		// |                    | arguments area               |
    277 		// | cgocallback        +------------------------------+ <- sp + 2*minFrameSize + 2*ptrSize
    278 		// |                    | fixed frame area             |
    279 		// +--------------------+------------------------------+ <- sp + minFrameSize + 2*ptrSize
    280 		// |                    | local variables (2 pointers) |
    281 		// | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
    282 		// |                    | fixed frame area             |
    283 		// +--------------------+------------------------------+ <- sp
    284 		cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
    285 	case "mips64", "mips64le":
    286 		// On mips64x, stack frame is two words and there's a saved LR between
    287 		// SP and the stack frame and between the stack frame and the arguments.
    288 		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
    289 	case "mips", "mipsle":
    290 		// On mipsx, stack frame is two words and there's a saved LR between
    291 		// SP and the stack frame and between the stack frame and the arguments.
    292 		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
    293 	}
    294 
    295 	// Invoke callback.
    296 	// NOTE(rsc): passing nil for argtype means that the copying of the
    297 	// results back into cb.arg happens without any corresponding write barriers.
    298 	// For cgo, cb.arg points into a C stack frame and therefore doesn't
    299 	// hold any pointers that the GC can find anyway - the write barrier
    300 	// would be a no-op.
    301 	reflectcall(nil, unsafe.Pointer(cb.fn), cb.arg, uint32(cb.argsize), 0)
    302 
    303 	if raceenabled {
    304 		racereleasemerge(unsafe.Pointer(&racecgosync))
    305 	}
    306 	if msanenabled {
    307 		// Tell msan that we wrote to the entire argument block.
    308 		// This tells msan that we set the results.
    309 		// Since we have already called the function it doesn't
    310 		// matter that we are writing to the non-result parameters.
    311 		msanwrite(cb.arg, cb.argsize)
    312 	}
    313 
    314 	// Do not unwind m->g0->sched.sp.
    315 	// Our caller, cgocallback, will do that.
    316 	restore = false
    317 }
    318 
    319 func unwindm(restore *bool) {
    320 	if !*restore {
    321 		return
    322 	}
    323 	// Restore sp saved by cgocallback during
    324 	// unwind of g's stack (see comment at top of file).
    325 	mp := acquirem()
    326 	sched := &mp.g0.sched
    327 	switch GOARCH {
    328 	default:
    329 		throw("unwindm not implemented")
    330 	case "386", "amd64", "arm", "ppc64", "ppc64le", "mips64", "mips64le", "s390x", "mips", "mipsle":
    331 		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
    332 	case "arm64":
    333 		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
    334 	}
    335 
    336 	// Call endcgo to do the accounting that cgocall will not have a
    337 	// chance to do during an unwind.
    338 	//
    339 	// In the case where a a Go call originates from C, ncgo is 0
    340 	// and there is no matching cgocall to end.
    341 	if mp.ncgo > 0 {
    342 		endcgo(mp)
    343 	}
    344 
    345 	releasem(mp)
    346 }
    347 
    348 // called from assembly
    349 func badcgocallback() {
    350 	throw("misaligned stack in cgocallback")
    351 }
    352 
    353 // called from (incomplete) assembly
    354 func cgounimpl() {
    355 	throw("cgo not implemented")
    356 }
    357 
    358 var racecgosync uint64 // represents possible synchronization in C code
    359 
    360 // Pointer checking for cgo code.
    361 
    362 // We want to detect all cases where a program that does not use
    363 // unsafe makes a cgo call passing a Go pointer to memory that
    364 // contains a Go pointer. Here a Go pointer is defined as a pointer
    365 // to memory allocated by the Go runtime. Programs that use unsafe
    366 // can evade this restriction easily, so we don't try to catch them.
    367 // The cgo program will rewrite all possibly bad pointer arguments to
    368 // call cgoCheckPointer, where we can catch cases of a Go pointer
    369 // pointing to a Go pointer.
    370 
    371 // Complicating matters, taking the address of a slice or array
    372 // element permits the C program to access all elements of the slice
    373 // or array. In that case we will see a pointer to a single element,
    374 // but we need to check the entire data structure.
    375 
    376 // The cgoCheckPointer call takes additional arguments indicating that
    377 // it was called on an address expression. An additional argument of
    378 // true means that it only needs to check a single element. An
    379 // additional argument of a slice or array means that it needs to
    380 // check the entire slice/array, but nothing else. Otherwise, the
    381 // pointer could be anything, and we check the entire heap object,
    382 // which is conservative but safe.
    383 
    384 // When and if we implement a moving garbage collector,
    385 // cgoCheckPointer will pin the pointer for the duration of the cgo
    386 // call.  (This is necessary but not sufficient; the cgo program will
    387 // also have to change to pin Go pointers that cannot point to Go
    388 // pointers.)
    389 
    390 // cgoCheckPointer checks if the argument contains a Go pointer that
    391 // points to a Go pointer, and panics if it does.
    392 func cgoCheckPointer(ptr interface{}, args ...interface{}) {
    393 	if debug.cgocheck == 0 {
    394 		return
    395 	}
    396 
    397 	ep := (*eface)(unsafe.Pointer(&ptr))
    398 	t := ep._type
    399 
    400 	top := true
    401 	if len(args) > 0 && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
    402 		p := ep.data
    403 		if t.kind&kindDirectIface == 0 {
    404 			p = *(*unsafe.Pointer)(p)
    405 		}
    406 		if !cgoIsGoPointer(p) {
    407 			return
    408 		}
    409 		aep := (*eface)(unsafe.Pointer(&args[0]))
    410 		switch aep._type.kind & kindMask {
    411 		case kindBool:
    412 			if t.kind&kindMask == kindUnsafePointer {
    413 				// We don't know the type of the element.
    414 				break
    415 			}
    416 			pt := (*ptrtype)(unsafe.Pointer(t))
    417 			cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
    418 			return
    419 		case kindSlice:
    420 			// Check the slice rather than the pointer.
    421 			ep = aep
    422 			t = ep._type
    423 		case kindArray:
    424 			// Check the array rather than the pointer.
    425 			// Pass top as false since we have a pointer
    426 			// to the array.
    427 			ep = aep
    428 			t = ep._type
    429 			top = false
    430 		default:
    431 			throw("can't happen")
    432 		}
    433 	}
    434 
    435 	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
    436 }
    437 
    438 const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
    439 const cgoResultFail = "cgo result has Go pointer"
    440 
    441 // cgoCheckArg is the real work of cgoCheckPointer. The argument p
    442 // is either a pointer to the value (of type t), or the value itself,
    443 // depending on indir. The top parameter is whether we are at the top
    444 // level, where Go pointers are allowed.
    445 func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
    446 	if t.kind&kindNoPointers != 0 {
    447 		// If the type has no pointers there is nothing to do.
    448 		return
    449 	}
    450 
    451 	switch t.kind & kindMask {
    452 	default:
    453 		throw("can't happen")
    454 	case kindArray:
    455 		at := (*arraytype)(unsafe.Pointer(t))
    456 		if !indir {
    457 			if at.len != 1 {
    458 				throw("can't happen")
    459 			}
    460 			cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
    461 			return
    462 		}
    463 		for i := uintptr(0); i < at.len; i++ {
    464 			cgoCheckArg(at.elem, p, true, top, msg)
    465 			p = add(p, at.elem.size)
    466 		}
    467 	case kindChan, kindMap:
    468 		// These types contain internal pointers that will
    469 		// always be allocated in the Go heap. It's never OK
    470 		// to pass them to C.
    471 		panic(errorString(msg))
    472 	case kindFunc:
    473 		if indir {
    474 			p = *(*unsafe.Pointer)(p)
    475 		}
    476 		if !cgoIsGoPointer(p) {
    477 			return
    478 		}
    479 		panic(errorString(msg))
    480 	case kindInterface:
    481 		it := *(**_type)(p)
    482 		if it == nil {
    483 			return
    484 		}
    485 		// A type known at compile time is OK since it's
    486 		// constant. A type not known at compile time will be
    487 		// in the heap and will not be OK.
    488 		if inheap(uintptr(unsafe.Pointer(it))) {
    489 			panic(errorString(msg))
    490 		}
    491 		p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
    492 		if !cgoIsGoPointer(p) {
    493 			return
    494 		}
    495 		if !top {
    496 			panic(errorString(msg))
    497 		}
    498 		cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
    499 	case kindSlice:
    500 		st := (*slicetype)(unsafe.Pointer(t))
    501 		s := (*slice)(p)
    502 		p = s.array
    503 		if !cgoIsGoPointer(p) {
    504 			return
    505 		}
    506 		if !top {
    507 			panic(errorString(msg))
    508 		}
    509 		if st.elem.kind&kindNoPointers != 0 {
    510 			return
    511 		}
    512 		for i := 0; i < s.cap; i++ {
    513 			cgoCheckArg(st.elem, p, true, false, msg)
    514 			p = add(p, st.elem.size)
    515 		}
    516 	case kindString:
    517 		ss := (*stringStruct)(p)
    518 		if !cgoIsGoPointer(ss.str) {
    519 			return
    520 		}
    521 		if !top {
    522 			panic(errorString(msg))
    523 		}
    524 	case kindStruct:
    525 		st := (*structtype)(unsafe.Pointer(t))
    526 		if !indir {
    527 			if len(st.fields) != 1 {
    528 				throw("can't happen")
    529 			}
    530 			cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
    531 			return
    532 		}
    533 		for _, f := range st.fields {
    534 			cgoCheckArg(f.typ, add(p, f.offset), true, top, msg)
    535 		}
    536 	case kindPtr, kindUnsafePointer:
    537 		if indir {
    538 			p = *(*unsafe.Pointer)(p)
    539 		}
    540 
    541 		if !cgoIsGoPointer(p) {
    542 			return
    543 		}
    544 		if !top {
    545 			panic(errorString(msg))
    546 		}
    547 
    548 		cgoCheckUnknownPointer(p, msg)
    549 	}
    550 }
    551 
    552 // cgoCheckUnknownPointer is called for an arbitrary pointer into Go
    553 // memory. It checks whether that Go memory contains any other
    554 // pointer into Go memory. If it does, we panic.
    555 // The return values are unused but useful to see in panic tracebacks.
    556 func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
    557 	if cgoInRange(p, mheap_.arena_start, mheap_.arena_used) {
    558 		if !inheap(uintptr(p)) {
    559 			// On 32-bit systems it is possible for C's allocated memory
    560 			// to have addresses between arena_start and arena_used.
    561 			// Either this pointer is a stack or an unused span or it's
    562 			// a C allocation. Escape analysis should prevent the first,
    563 			// garbage collection should prevent the second,
    564 			// and the third is completely OK.
    565 			return
    566 		}
    567 
    568 		b, hbits, span, _ := heapBitsForObject(uintptr(p), 0, 0)
    569 		base = b
    570 		if base == 0 {
    571 			return
    572 		}
    573 		n := span.elemsize
    574 		for i = uintptr(0); i < n; i += sys.PtrSize {
    575 			if i != 1*sys.PtrSize && !hbits.morePointers() {
    576 				// No more possible pointers.
    577 				break
    578 			}
    579 			if hbits.isPointer() {
    580 				if cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
    581 					panic(errorString(msg))
    582 				}
    583 			}
    584 			hbits = hbits.next()
    585 		}
    586 
    587 		return
    588 	}
    589 
    590 	for _, datap := range activeModules() {
    591 		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
    592 			// We have no way to know the size of the object.
    593 			// We have to assume that it might contain a pointer.
    594 			panic(errorString(msg))
    595 		}
    596 		// In the text or noptr sections, we know that the
    597 		// pointer does not point to a Go pointer.
    598 	}
    599 
    600 	return
    601 }
    602 
    603 // cgoIsGoPointer returns whether the pointer is a Go pointer--a
    604 // pointer to Go memory. We only care about Go memory that might
    605 // contain pointers.
    606 //go:nosplit
    607 //go:nowritebarrierrec
    608 func cgoIsGoPointer(p unsafe.Pointer) bool {
    609 	if p == nil {
    610 		return false
    611 	}
    612 
    613 	if inHeapOrStack(uintptr(p)) {
    614 		return true
    615 	}
    616 
    617 	for _, datap := range activeModules() {
    618 		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
    619 			return true
    620 		}
    621 	}
    622 
    623 	return false
    624 }
    625 
    626 // cgoInRange returns whether p is between start and end.
    627 //go:nosplit
    628 //go:nowritebarrierrec
    629 func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
    630 	return start <= uintptr(p) && uintptr(p) < end
    631 }
    632 
    633 // cgoCheckResult is called to check the result parameter of an
    634 // exported Go function. It panics if the result is or contains a Go
    635 // pointer.
    636 func cgoCheckResult(val interface{}) {
    637 	if debug.cgocheck == 0 {
    638 		return
    639 	}
    640 
    641 	ep := (*eface)(unsafe.Pointer(&val))
    642 	t := ep._type
    643 	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
    644 }
    645