1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 /* 10 * defined constants 11 */ 12 const ( 13 // G status 14 // 15 // If you add to this list, add to the list 16 // of "okay during garbage collection" status 17 // in mgcmark.go too. 18 _Gidle = iota // 0 19 _Grunnable // 1 runnable and on a run queue 20 _Grunning // 2 21 _Gsyscall // 3 22 _Gwaiting // 4 23 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts 24 _Gdead // 6 25 _Genqueue // 7 Only the Gscanenqueue is used. 26 _Gcopystack // 8 in this state when newstack is moving the stack 27 // the following encode that the GC is scanning the stack and what to do when it is done 28 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state, 29 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs 30 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning completes make Grunnable (it is already on run queue) 31 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack. 32 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall 33 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting 34 // _Gscanmoribund_unused, // not possible 35 // _Gscandead, // not possible 36 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue 37 ) 38 39 const ( 40 // P status 41 _Pidle = iota 42 _Prunning // Only this P is allowed to change from _Prunning. 43 _Psyscall 44 _Pgcstop 45 _Pdead 46 ) 47 48 // The next line makes 'go generate' write the zgen_*.go files with 49 // per-OS and per-arch information, including constants 50 // named goos_$GOOS and goarch_$GOARCH for every 51 // known GOOS and GOARCH. The constant is 1 on the 52 // current system, 0 otherwise; multiplying by them is 53 // useful for defining GOOS- or GOARCH-specific constants. 54 //go:generate go run gengoos.go 55 56 type mutex struct { 57 // Futex-based impl treats it as uint32 key, 58 // while sema-based impl as M* waitm. 59 // Used to be a union, but unions break precise GC. 60 key uintptr 61 } 62 63 type note struct { 64 // Futex-based impl treats it as uint32 key, 65 // while sema-based impl as M* waitm. 66 // Used to be a union, but unions break precise GC. 67 key uintptr 68 } 69 70 type _string struct { 71 str *byte 72 len int 73 } 74 75 type funcval struct { 76 fn uintptr 77 // variable-size, fn-specific data here 78 } 79 80 type iface struct { 81 tab *itab 82 data unsafe.Pointer 83 } 84 85 type eface struct { 86 _type *_type 87 data unsafe.Pointer 88 } 89 90 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 91 // It is particularly important to avoid write barriers when the current P has 92 // been released, because the GC thinks the world is stopped, and an 93 // unexpected write barrier would not be synchronized with the GC, 94 // which can lead to a half-executed write barrier that has marked the object 95 // but not queued it. If the GC skips the object and completes before the 96 // queuing can occur, it will incorrectly free the object. 97 // 98 // We tried using special assignment functions invoked only when not 99 // holding a running P, but then some updates to a particular memory 100 // word went through write barriers and some did not. This breaks the 101 // write barrier shadow checking mode, and it is also scary: better to have 102 // a word that is completely ignored by the GC than to have one for which 103 // only a few updates are ignored. 104 // 105 // Gs, Ms, and Ps are always reachable via true pointers in the 106 // allgs, allm, and allp lists or (during allocation before they reach those lists) 107 // from stack variables. 108 109 // A guintptr holds a goroutine pointer, but typed as a uintptr 110 // to bypass write barriers. It is used in the Gobuf goroutine state 111 // and in scheduling lists that are manipulated without a P. 112 // 113 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 114 // In one of the few places it is updated by Go code - func save - it must be 115 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 116 // Instead of figuring out how to emit the write barriers missing in the 117 // assembly manipulation, we change the type of the field to uintptr, 118 // so that it does not require write barriers at all. 119 // 120 // Goroutine structs are published in the allg list and never freed. 121 // That will keep the goroutine structs from being collected. 122 // There is never a time that Gobuf.g's contain the only references 123 // to a goroutine: the publishing of the goroutine in allg comes first. 124 // Goroutine pointers are also kept in non-GC-visible places like TLS, 125 // so I can't see them ever moving. If we did want to start moving data 126 // in the GC, we'd need to allocate the goroutine structs from an 127 // alternate arena. Using guintptr doesn't make that problem any worse. 128 type guintptr uintptr 129 130 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 131 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 132 func (gp *guintptr) cas(old, new guintptr) bool { 133 return casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 134 } 135 136 type puintptr uintptr 137 138 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 139 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 140 141 type muintptr uintptr 142 143 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 144 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 145 146 type gobuf struct { 147 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 148 sp uintptr 149 pc uintptr 150 g guintptr 151 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it 152 ret uintreg 153 lr uintptr 154 bp uintptr // for GOEXPERIMENT=framepointer 155 } 156 157 // Known to compiler. 158 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 159 type sudog struct { 160 g *g 161 selectdone *uint32 162 next *sudog 163 prev *sudog 164 elem unsafe.Pointer // data element 165 releasetime int64 166 nrelease int32 // -1 for acquire 167 waitlink *sudog // g.waiting list 168 } 169 170 type gcstats struct { 171 // the struct must consist of only uint64's, 172 // because it is casted to uint64[]. 173 nhandoff uint64 174 nhandoffcnt uint64 175 nprocyield uint64 176 nosyield uint64 177 nsleep uint64 178 } 179 180 type libcall struct { 181 fn uintptr 182 n uintptr // number of parameters 183 args uintptr // parameters 184 r1 uintptr // return values 185 r2 uintptr 186 err uintptr // error number 187 } 188 189 // describes how to handle callback 190 type wincallbackcontext struct { 191 gobody unsafe.Pointer // go function to call 192 argsize uintptr // callback arguments size (in bytes) 193 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 194 cleanstack bool 195 } 196 197 // Stack describes a Go execution stack. 198 // The bounds of the stack are exactly [lo, hi), 199 // with no implicit data structures on either side. 200 type stack struct { 201 lo uintptr 202 hi uintptr 203 } 204 205 // stkbar records the state of a G's stack barrier. 206 type stkbar struct { 207 savedLRPtr uintptr // location overwritten by stack barrier PC 208 savedLRVal uintptr // value overwritten at savedLRPtr 209 } 210 211 type g struct { 212 // Stack parameters. 213 // stack describes the actual stack memory: [stack.lo, stack.hi). 214 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 215 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 216 // stackguard1 is the stack pointer compared in the C stack growth prologue. 217 // It is stack.lo+StackGuard on g0 and gsignal stacks. 218 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 219 stack stack // offset known to runtime/cgo 220 stackguard0 uintptr // offset known to liblink 221 stackguard1 uintptr // offset known to liblink 222 223 _panic *_panic // innermost panic - offset known to liblink 224 _defer *_defer // innermost defer 225 m *m // current m; offset known to arm liblink 226 stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc) 227 sched gobuf 228 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 229 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 230 stkbar []stkbar // stack barriers, from low to high 231 stkbarPos uintptr // index of lowest stack barrier not hit 232 param unsafe.Pointer // passed parameter on wakeup 233 atomicstatus uint32 234 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 235 goid int64 236 waitsince int64 // approx time when the g become blocked 237 waitreason string // if status==Gwaiting 238 schedlink guintptr 239 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 240 paniconfault bool // panic (instead of crash) on unexpected fault address 241 preemptscan bool // preempted g does scan for gc 242 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 243 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan 244 throwsplit bool // must not split stack 245 raceignore int8 // ignore race detection events 246 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 247 sysexitticks int64 // cputicks when syscall has returned (for tracing) 248 sysexitseq uint64 // trace seq when syscall has returned (for tracing) 249 lockedm *m 250 sig uint32 251 writebuf []byte 252 sigcode0 uintptr 253 sigcode1 uintptr 254 sigpc uintptr 255 gopc uintptr // pc of go statement that created this goroutine 256 startpc uintptr // pc of goroutine function 257 racectx uintptr 258 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr) 259 readyg *g // scratch for readyExecute 260 261 // Per-G gcController state 262 gcalloc uintptr // bytes allocated during this GC cycle 263 gcscanwork int64 // scan work done (or stolen) this GC cycle 264 } 265 266 type mts struct { 267 tv_sec int64 268 tv_nsec int64 269 } 270 271 type mscratch struct { 272 v [6]uintptr 273 } 274 275 type m struct { 276 g0 *g // goroutine with scheduling stack 277 morebuf gobuf // gobuf arg to morestack 278 divmod uint32 // div/mod denominator for arm - known to liblink 279 280 // Fields not known to debuggers. 281 procid uint64 // for debuggers, but offset not hard-coded 282 gsignal *g // signal-handling g 283 sigmask [4]uintptr // storage for saved signal mask 284 tls [4]uintptr // thread-local storage (for x86 extern register) 285 mstartfn func() 286 curg *g // current running goroutine 287 caughtsig guintptr // goroutine running during fatal signal 288 p puintptr // attached p for executing go code (nil if not executing go code) 289 nextp puintptr 290 id int32 291 mallocing int32 292 throwing int32 293 preemptoff string // if != "", keep curg running on this m 294 locks int32 295 softfloat int32 296 dying int32 297 profilehz int32 298 helpgc int32 299 spinning bool // m is out of work and is actively looking for work 300 blocked bool // m is blocked on a note 301 inwb bool // m is executing a write barrier 302 printlock int8 303 fastrand uint32 304 ncgocall uint64 // number of cgo calls in total 305 ncgo int32 // number of cgo calls currently in progress 306 park note 307 alllink *m // on allm 308 schedlink muintptr 309 machport uint32 // return address for mach ipc (os x) 310 mcache *mcache 311 lockedg *g 312 createstack [32]uintptr // stack that created this thread. 313 freglo [16]uint32 // d[i] lsb and f[i] 314 freghi [16]uint32 // d[i] msb and f[i+16] 315 fflag uint32 // floating point compare flags 316 locked uint32 // tracking for lockosthread 317 nextwaitm uintptr // next m waiting for lock 318 waitsema uintptr // semaphore for parking on locks 319 waitsemacount uint32 320 waitsemalock uint32 321 gcstats gcstats 322 needextram bool 323 traceback uint8 324 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool 325 waitlock unsafe.Pointer 326 waittraceev byte 327 waittraceskip int 328 startingtrace bool 329 syscalltick uint32 330 //#ifdef GOOS_windows 331 thread uintptr // thread handle 332 // these are here because they are too large to be on the stack 333 // of low-level NOSPLIT functions. 334 libcall libcall 335 libcallpc uintptr // for cpu profiler 336 libcallsp uintptr 337 libcallg guintptr 338 syscall libcall // stores syscall parameters on windows 339 //#endif 340 //#ifdef GOOS_solaris 341 perrno *int32 // pointer to tls errno 342 // these are here because they are too large to be on the stack 343 // of low-level NOSPLIT functions. 344 //LibCall libcall; 345 ts mts 346 scratch mscratch 347 //#endif 348 //#ifdef GOOS_plan9 349 notesig *int8 350 errstr *byte 351 //#endif 352 } 353 354 type p struct { 355 lock mutex 356 357 id int32 358 status uint32 // one of pidle/prunning/... 359 link puintptr 360 schedtick uint32 // incremented on every scheduler call 361 syscalltick uint32 // incremented on every system call 362 m muintptr // back-link to associated m (nil if idle) 363 mcache *mcache 364 365 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 366 deferpoolbuf [5][32]*_defer 367 368 // Cache of goroutine ids, amortizes accesses to runtimesched.goidgen. 369 goidcache uint64 370 goidcacheend uint64 371 372 // Queue of runnable goroutines. Accessed without lock. 373 runqhead uint32 374 runqtail uint32 375 runq [256]*g 376 // runnext, if non-nil, is a runnable G that was ready'd by 377 // the current G and should be run next instead of what's in 378 // runq if there's time remaining in the running G's time 379 // slice. It will inherit the time left in the current time 380 // slice. If a set of goroutines is locked in a 381 // communicate-and-wait pattern, this schedules that set as a 382 // unit and eliminates the (potentially large) scheduling 383 // latency that otherwise arises from adding the ready'd 384 // goroutines to the end of the run queue. 385 runnext guintptr 386 387 // Available G's (status == Gdead) 388 gfree *g 389 gfreecnt int32 390 391 sudogcache []*sudog 392 sudogbuf [128]*sudog 393 394 tracebuf *traceBuf 395 396 palloc persistentAlloc // per-P to avoid mutex 397 398 // Per-P GC state 399 gcAssistTime int64 // Nanoseconds in assistAlloc 400 gcBgMarkWorker *g 401 gcMarkWorkerMode gcMarkWorkerMode 402 403 // gcw is this P's GC work buffer cache. The work buffer is 404 // filled by write barriers, drained by mutator assists, and 405 // disposed on certain GC state transitions. 406 gcw gcWork 407 408 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 409 410 pad [64]byte 411 } 412 413 const ( 414 // The max value of GOMAXPROCS. 415 // There are no fundamental restrictions on the value. 416 _MaxGomaxprocs = 1 << 8 417 ) 418 419 type schedt struct { 420 lock mutex 421 422 goidgen uint64 423 424 midle muintptr // idle m's waiting for work 425 nmidle int32 // number of idle m's waiting for work 426 nmidlelocked int32 // number of locked m's waiting for work 427 mcount int32 // number of m's that have been created 428 maxmcount int32 // maximum number of m's allowed (or die) 429 430 pidle puintptr // idle p's 431 npidle uint32 432 nmspinning uint32 433 434 // Global runnable queue. 435 runqhead guintptr 436 runqtail guintptr 437 runqsize int32 438 439 // Global cache of dead G's. 440 gflock mutex 441 gfree *g 442 ngfree int32 443 444 // Central cache of sudog structs. 445 sudoglock mutex 446 sudogcache *sudog 447 448 // Central pool of available defer structs of different sizes. 449 deferlock mutex 450 deferpool [5]*_defer 451 452 gcwaiting uint32 // gc is waiting to run 453 stopwait int32 454 stopnote note 455 sysmonwait uint32 456 sysmonnote note 457 lastpoll uint64 458 459 // safepointFn should be called on each P at the next GC 460 // safepoint if p.runSafePointFn is set. 461 safePointFn func(*p) 462 safePointWait int32 463 safePointNote note 464 465 profilehz int32 // cpu profiling rate 466 467 procresizetime int64 // nanotime() of last change to gomaxprocs 468 totaltime int64 // gomaxprocs dt up to procresizetime 469 } 470 471 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. 472 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. 473 // External locks are not recursive; a second lock is silently ignored. 474 // The upper bits of m->locked record the nesting depth of calls to lockOSThread 475 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). 476 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main 477 // goroutine is holding the lock during the initialization phase. 478 const ( 479 _LockExternal = 1 480 _LockInternal = 2 481 ) 482 483 type sigtabtt struct { 484 flags int32 485 name *int8 486 } 487 488 const ( 489 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 490 _SigKill // if signal.Notify doesn't take it, exit quietly 491 _SigThrow // if signal.Notify doesn't take it, exit loudly 492 _SigPanic // if the signal is from the kernel, panic 493 _SigDefault // if the signal isn't explicitly requested, don't monitor it 494 _SigHandling // our signal handler is registered 495 _SigIgnored // the signal was ignored before we registered for it 496 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 497 _SigSetStack // add SA_ONSTACK to libc handler 498 _SigUnblock // unblocked in minit 499 ) 500 501 // Layout of in-memory per-function information prepared by linker 502 // See https://golang.org/s/go12symtab. 503 // Keep in sync with linker 504 // and with package debug/gosym and with symtab.go in package runtime. 505 type _func struct { 506 entry uintptr // start pc 507 nameoff int32 // function name 508 509 args int32 // in/out args size 510 frame int32 // legacy frame size; use pcsp if possible 511 512 pcsp int32 513 pcfile int32 514 pcln int32 515 npcdata int32 516 nfuncdata int32 517 } 518 519 // layout of Itab known to compilers 520 // allocated in non-garbage-collected memory 521 type itab struct { 522 inter *interfacetype 523 _type *_type 524 link *itab 525 bad int32 526 unused int32 527 fun [1]uintptr // variable sized 528 } 529 530 // Lock-free stack node. 531 // // Also known to export_test.go. 532 type lfnode struct { 533 next uint64 534 pushcnt uintptr 535 } 536 537 type forcegcstate struct { 538 lock mutex 539 g *g 540 idle uint32 541 } 542 543 /* 544 * known to compiler 545 */ 546 const ( 547 _Structrnd = regSize 548 ) 549 550 // startup_random_data holds random bytes initialized at startup. These come from 551 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 552 var startupRandomData []byte 553 554 // extendRandom extends the random numbers in r[:n] to the whole slice r. 555 // Treats n<0 as n==0. 556 func extendRandom(r []byte, n int) { 557 if n < 0 { 558 n = 0 559 } 560 for n < len(r) { 561 // Extend random bits using hash function & time seed 562 w := n 563 if w > 16 { 564 w = 16 565 } 566 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 567 for i := 0; i < ptrSize && n < len(r); i++ { 568 r[n] = byte(h) 569 n++ 570 h >>= 8 571 } 572 } 573 } 574 575 /* 576 * deferred subroutine calls 577 */ 578 type _defer struct { 579 siz int32 580 started bool 581 sp uintptr // sp at time of defer 582 pc uintptr 583 fn *funcval 584 _panic *_panic // panic that is running defer 585 link *_defer 586 } 587 588 /* 589 * panics 590 */ 591 type _panic struct { 592 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 593 arg interface{} // argument to panic 594 link *_panic // link to earlier panic 595 recovered bool // whether this panic is over 596 aborted bool // the panic was aborted 597 } 598 599 /* 600 * stack traces 601 */ 602 603 type stkframe struct { 604 fn *_func // function being run 605 pc uintptr // program counter within fn 606 continpc uintptr // program counter where execution can continue, or 0 if not 607 lr uintptr // program counter at caller aka link register 608 sp uintptr // stack pointer at pc 609 fp uintptr // stack pointer at caller aka frame pointer 610 varp uintptr // top of local variables 611 argp uintptr // pointer to function arguments 612 arglen uintptr // number of bytes at argp 613 argmap *bitvector // force use of this argmap 614 } 615 616 const ( 617 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 618 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 619 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 620 ) 621 622 const ( 623 // The maximum number of frames we print for a traceback 624 _TracebackMaxFrames = 100 625 ) 626 627 var ( 628 emptystring string 629 allg **g 630 allglen uintptr 631 lastg *g 632 allm *m 633 allp [_MaxGomaxprocs + 1]*p 634 gomaxprocs int32 635 panicking uint32 636 goos *int8 637 ncpu int32 638 signote note 639 forcegc forcegcstate 640 sched schedt 641 newprocs int32 642 643 // Information about what cpu features are available. 644 // Set on startup in asm_{x86,amd64}.s. 645 cpuid_ecx uint32 646 cpuid_edx uint32 647 lfenceBeforeRdtsc bool 648 649 goarm uint8 // set by cmd/link on arm systems 650 ) 651 652 // Set by the linker so the runtime can determine the buildmode. 653 var ( 654 islibrary bool // -buildmode=c-shared 655 isarchive bool // -buildmode=c-archive 656 ) 657 658 /* 659 * mutual exclusion locks. in the uncontended case, 660 * as fast as spin locks (just a few user-level instructions), 661 * but on the contention path they sleep in the kernel. 662 * a zeroed Mutex is unlocked (no need to initialize each lock). 663 */ 664 665 /* 666 * sleep and wakeup on one-time events. 667 * before any calls to notesleep or notewakeup, 668 * must call noteclear to initialize the Note. 669 * then, exactly one thread can call notesleep 670 * and exactly one thread can call notewakeup (once). 671 * once notewakeup has been called, the notesleep 672 * will return. future notesleep will return immediately. 673 * subsequent noteclear must be called only after 674 * previous notesleep has returned, e.g. it's disallowed 675 * to call noteclear straight after notewakeup. 676 * 677 * notetsleep is like notesleep but wakes up after 678 * a given number of nanoseconds even if the event 679 * has not yet happened. if a goroutine uses notetsleep to 680 * wake up early, it must wait to call noteclear until it 681 * can be sure that no other goroutine is calling 682 * notewakeup. 683 * 684 * notesleep/notetsleep are generally called on g0, 685 * notetsleepg is similar to notetsleep but is called on user g. 686 */ 687 // bool runtimenotetsleep(Note*, int64); // false - timeout 688 // bool runtimenotetsleepg(Note*, int64); // false - timeout 689 690 /* 691 * Lock-free stack. 692 * Initialize uint64 head to 0, compare with 0 to test for emptiness. 693 * The stack does not keep pointers to nodes, 694 * so they can be garbage collected if there are no other pointers to nodes. 695 */ 696 697 // for mmap, we only pass the lower 32 bits of file offset to the 698 // assembly routine; the higher bits (if required), should be provided 699 // by the assembly routine as 0. 700