1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 var indexError = error(errorString("index out of range")) 10 11 func panicindex() { 12 panic(indexError) 13 } 14 15 var sliceError = error(errorString("slice bounds out of range")) 16 17 func panicslice() { 18 panic(sliceError) 19 } 20 21 var divideError = error(errorString("integer divide by zero")) 22 23 func panicdivide() { 24 panic(divideError) 25 } 26 27 var overflowError = error(errorString("integer overflow")) 28 29 func panicoverflow() { 30 panic(overflowError) 31 } 32 33 var floatError = error(errorString("floating point error")) 34 35 func panicfloat() { 36 panic(floatError) 37 } 38 39 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 40 41 func panicmem() { 42 panic(memoryError) 43 } 44 45 func throwreturn() { 46 throw("no return at end of a typed function - compiler is broken") 47 } 48 49 func throwinit() { 50 throw("recursive call during initialization - linker skew") 51 } 52 53 // Create a new deferred function fn with siz bytes of arguments. 54 // The compiler turns a defer statement into a call to this. 55 //go:nosplit 56 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 57 if getg().m.curg != getg() { 58 // go code on the system stack can't defer 59 throw("defer on system stack") 60 } 61 62 // the arguments of fn are in a perilous state. The stack map 63 // for deferproc does not describe them. So we can't let garbage 64 // collection or stack copying trigger until we've copied them out 65 // to somewhere safe. The memmove below does that. 66 // Until the copy completes, we can only call nosplit routines. 67 sp := getcallersp(unsafe.Pointer(&siz)) 68 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 69 callerpc := getcallerpc(unsafe.Pointer(&siz)) 70 71 systemstack(func() { 72 d := newdefer(siz) 73 if d._panic != nil { 74 throw("deferproc: d.panic != nil after newdefer") 75 } 76 d.fn = fn 77 d.pc = callerpc 78 d.sp = sp 79 memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz)) 80 }) 81 82 // deferproc returns 0 normally. 83 // a deferred func that stops a panic 84 // makes the deferproc return 1. 85 // the code the compiler generates always 86 // checks the return value and jumps to the 87 // end of the function if deferproc returns != 0. 88 return0() 89 // No code can go here - the C return register has 90 // been set and must not be clobbered. 91 } 92 93 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 94 // Each P holds a pool for defers with small arg sizes. 95 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 96 97 const ( 98 deferHeaderSize = unsafe.Sizeof(_defer{}) 99 minDeferAlloc = (deferHeaderSize + 15) &^ 15 100 minDeferArgs = minDeferAlloc - deferHeaderSize 101 ) 102 103 // defer size class for arg size sz 104 //go:nosplit 105 func deferclass(siz uintptr) uintptr { 106 if siz <= minDeferArgs { 107 return 0 108 } 109 return (siz - minDeferArgs + 15) / 16 110 } 111 112 // total size of memory block for defer with arg size sz 113 func totaldefersize(siz uintptr) uintptr { 114 if siz <= minDeferArgs { 115 return minDeferAlloc 116 } 117 return deferHeaderSize + siz 118 } 119 120 // Ensure that defer arg sizes that map to the same defer size class 121 // also map to the same malloc size class. 122 func testdefersizes() { 123 var m [len(p{}.deferpool)]int32 124 125 for i := range m { 126 m[i] = -1 127 } 128 for i := uintptr(0); ; i++ { 129 defersc := deferclass(i) 130 if defersc >= uintptr(len(m)) { 131 break 132 } 133 siz := roundupsize(totaldefersize(i)) 134 if m[defersc] < 0 { 135 m[defersc] = int32(siz) 136 continue 137 } 138 if m[defersc] != int32(siz) { 139 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 140 throw("bad defer size class") 141 } 142 } 143 } 144 145 // The arguments associated with a deferred call are stored 146 // immediately after the _defer header in memory. 147 //go:nosplit 148 func deferArgs(d *_defer) unsafe.Pointer { 149 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 150 } 151 152 var deferType *_type // type of _defer struct 153 154 func init() { 155 var x interface{} 156 x = (*_defer)(nil) 157 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 158 } 159 160 // Allocate a Defer, usually using per-P pool. 161 // Each defer must be released with freedefer. 162 // Note: runs on g0 stack 163 func newdefer(siz int32) *_defer { 164 var d *_defer 165 sc := deferclass(uintptr(siz)) 166 mp := acquirem() 167 if sc < uintptr(len(p{}.deferpool)) { 168 pp := mp.p.ptr() 169 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 170 lock(&sched.deferlock) 171 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 172 d := sched.deferpool[sc] 173 sched.deferpool[sc] = d.link 174 d.link = nil 175 pp.deferpool[sc] = append(pp.deferpool[sc], d) 176 } 177 unlock(&sched.deferlock) 178 } 179 if n := len(pp.deferpool[sc]); n > 0 { 180 d = pp.deferpool[sc][n-1] 181 pp.deferpool[sc][n-1] = nil 182 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 183 } 184 } 185 if d == nil { 186 // Allocate new defer+args. 187 total := roundupsize(totaldefersize(uintptr(siz))) 188 d = (*_defer)(mallocgc(total, deferType, 0)) 189 } 190 d.siz = siz 191 gp := mp.curg 192 d.link = gp._defer 193 gp._defer = d 194 releasem(mp) 195 return d 196 } 197 198 // Free the given defer. 199 // The defer cannot be used after this call. 200 func freedefer(d *_defer) { 201 if d._panic != nil { 202 freedeferpanic() 203 } 204 if d.fn != nil { 205 freedeferfn() 206 } 207 sc := deferclass(uintptr(d.siz)) 208 if sc < uintptr(len(p{}.deferpool)) { 209 mp := acquirem() 210 pp := mp.p.ptr() 211 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 212 // Transfer half of local cache to the central cache. 213 var first, last *_defer 214 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 215 n := len(pp.deferpool[sc]) 216 d := pp.deferpool[sc][n-1] 217 pp.deferpool[sc][n-1] = nil 218 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 219 if first == nil { 220 first = d 221 } else { 222 last.link = d 223 } 224 last = d 225 } 226 lock(&sched.deferlock) 227 last.link = sched.deferpool[sc] 228 sched.deferpool[sc] = first 229 unlock(&sched.deferlock) 230 } 231 *d = _defer{} 232 pp.deferpool[sc] = append(pp.deferpool[sc], d) 233 releasem(mp) 234 } 235 } 236 237 // Separate function so that it can split stack. 238 // Windows otherwise runs out of stack space. 239 func freedeferpanic() { 240 // _panic must be cleared before d is unlinked from gp. 241 throw("freedefer with d._panic != nil") 242 } 243 244 func freedeferfn() { 245 // fn must be cleared before d is unlinked from gp. 246 throw("freedefer with d.fn != nil") 247 } 248 249 // Run a deferred function if there is one. 250 // The compiler inserts a call to this at the end of any 251 // function which calls defer. 252 // If there is a deferred function, this will call runtimejmpdefer, 253 // which will jump to the deferred function such that it appears 254 // to have been called by the caller of deferreturn at the point 255 // just before deferreturn was called. The effect is that deferreturn 256 // is called again and again until there are no more deferred functions. 257 // Cannot split the stack because we reuse the caller's frame to 258 // call the deferred function. 259 260 // The single argument isn't actually used - it just has its address 261 // taken so it can be matched against pending defers. 262 //go:nosplit 263 func deferreturn(arg0 uintptr) { 264 gp := getg() 265 d := gp._defer 266 if d == nil { 267 return 268 } 269 sp := getcallersp(unsafe.Pointer(&arg0)) 270 if d.sp != sp { 271 return 272 } 273 274 // Moving arguments around. 275 // Do not allow preemption here, because the garbage collector 276 // won't know the form of the arguments until the jmpdefer can 277 // flip the PC over to fn. 278 mp := acquirem() 279 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 280 fn := d.fn 281 d.fn = nil 282 gp._defer = d.link 283 // Switch to systemstack merely to save nosplit stack space. 284 systemstack(func() { 285 freedefer(d) 286 }) 287 releasem(mp) 288 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 289 } 290 291 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 292 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 293 // is not panic, however, any recover calls in those deferred functions will return nil. 294 // 295 // Calling Goexit from the main goroutine terminates that goroutine 296 // without func main returning. Since func main has not returned, 297 // the program continues execution of other goroutines. 298 // If all other goroutines exit, the program crashes. 299 func Goexit() { 300 // Run all deferred functions for the current goroutine. 301 // This code is similar to gopanic, see that implementation 302 // for detailed comments. 303 gp := getg() 304 for { 305 d := gp._defer 306 if d == nil { 307 break 308 } 309 if d.started { 310 if d._panic != nil { 311 d._panic.aborted = true 312 d._panic = nil 313 } 314 d.fn = nil 315 gp._defer = d.link 316 freedefer(d) 317 continue 318 } 319 d.started = true 320 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 321 if gp._defer != d { 322 throw("bad defer entry in Goexit") 323 } 324 d._panic = nil 325 d.fn = nil 326 gp._defer = d.link 327 freedefer(d) 328 // Note: we ignore recovers here because Goexit isn't a panic 329 } 330 goexit1() 331 } 332 333 // Print all currently active panics. Used when crashing. 334 func printpanics(p *_panic) { 335 if p.link != nil { 336 printpanics(p.link) 337 print("\t") 338 } 339 print("panic: ") 340 printany(p.arg) 341 if p.recovered { 342 print(" [recovered]") 343 } 344 print("\n") 345 } 346 347 // The implementation of the predeclared function panic. 348 func gopanic(e interface{}) { 349 gp := getg() 350 if gp.m.curg != gp { 351 print("panic: ") 352 printany(e) 353 print("\n") 354 throw("panic on system stack") 355 } 356 357 // m.softfloat is set during software floating point. 358 // It increments m.locks to avoid preemption. 359 // We moved the memory loads out, so there shouldn't be 360 // any reason for it to panic anymore. 361 if gp.m.softfloat != 0 { 362 gp.m.locks-- 363 gp.m.softfloat = 0 364 throw("panic during softfloat") 365 } 366 if gp.m.mallocing != 0 { 367 print("panic: ") 368 printany(e) 369 print("\n") 370 throw("panic during malloc") 371 } 372 if gp.m.preemptoff != "" { 373 print("panic: ") 374 printany(e) 375 print("\n") 376 print("preempt off reason: ") 377 print(gp.m.preemptoff) 378 print("\n") 379 throw("panic during preemptoff") 380 } 381 if gp.m.locks != 0 { 382 print("panic: ") 383 printany(e) 384 print("\n") 385 throw("panic holding locks") 386 } 387 388 var p _panic 389 p.arg = e 390 p.link = gp._panic 391 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 392 393 for { 394 d := gp._defer 395 if d == nil { 396 break 397 } 398 399 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 400 // take defer off list. The earlier panic or Goexit will not continue running. 401 if d.started { 402 if d._panic != nil { 403 d._panic.aborted = true 404 } 405 d._panic = nil 406 d.fn = nil 407 gp._defer = d.link 408 freedefer(d) 409 continue 410 } 411 412 // Mark defer as started, but keep on list, so that traceback 413 // can find and update the defer's argument frame if stack growth 414 // or a garbage collection happens before reflectcall starts executing d.fn. 415 d.started = true 416 417 // Record the panic that is running the defer. 418 // If there is a new panic during the deferred call, that panic 419 // will find d in the list and will mark d._panic (this panic) aborted. 420 d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) 421 422 p.argp = unsafe.Pointer(getargp(0)) 423 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 424 p.argp = nil 425 426 // reflectcall did not panic. Remove d. 427 if gp._defer != d { 428 throw("bad defer entry in panic") 429 } 430 d._panic = nil 431 d.fn = nil 432 gp._defer = d.link 433 434 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 435 //GC() 436 437 pc := d.pc 438 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 439 freedefer(d) 440 if p.recovered { 441 gp._panic = p.link 442 // Aborted panics are marked but remain on the g.panic list. 443 // Remove them from the list. 444 for gp._panic != nil && gp._panic.aborted { 445 gp._panic = gp._panic.link 446 } 447 if gp._panic == nil { // must be done with signal 448 gp.sig = 0 449 } 450 // Pass information about recovering frame to recovery. 451 gp.sigcode0 = uintptr(sp) 452 gp.sigcode1 = pc 453 mcall(recovery) 454 throw("recovery failed") // mcall should not return 455 } 456 } 457 458 // ran out of deferred calls - old-school panic now 459 startpanic() 460 printpanics(gp._panic) 461 dopanic(0) // should not return 462 *(*int)(nil) = 0 // not reached 463 } 464 465 // getargp returns the location where the caller 466 // writes outgoing function call arguments. 467 //go:nosplit 468 func getargp(x int) uintptr { 469 // x is an argument mainly so that we can return its address. 470 // However, we need to make the function complex enough 471 // that it won't be inlined. We always pass x = 0, so this code 472 // does nothing other than keep the compiler from thinking 473 // the function is simple enough to inline. 474 if x > 0 { 475 return getcallersp(unsafe.Pointer(&x)) * 0 476 } 477 return uintptr(noescape(unsafe.Pointer(&x))) 478 } 479 480 // The implementation of the predeclared function recover. 481 // Cannot split the stack because it needs to reliably 482 // find the stack segment of its caller. 483 // 484 // TODO(rsc): Once we commit to CopyStackAlways, 485 // this doesn't need to be nosplit. 486 //go:nosplit 487 func gorecover(argp uintptr) interface{} { 488 // Must be in a function running as part of a deferred call during the panic. 489 // Must be called from the topmost function of the call 490 // (the function used in the defer statement). 491 // p.argp is the argument pointer of that topmost deferred function call. 492 // Compare against argp reported by caller. 493 // If they match, the caller is the one who can recover. 494 gp := getg() 495 p := gp._panic 496 if p != nil && !p.recovered && argp == uintptr(p.argp) { 497 p.recovered = true 498 return p.arg 499 } 500 return nil 501 } 502 503 //go:nosplit 504 func startpanic() { 505 systemstack(startpanic_m) 506 } 507 508 //go:nosplit 509 func dopanic(unused int) { 510 pc := getcallerpc(unsafe.Pointer(&unused)) 511 sp := getcallersp(unsafe.Pointer(&unused)) 512 gp := getg() 513 systemstack(func() { 514 dopanic_m(gp, pc, sp) // should never return 515 }) 516 *(*int)(nil) = 0 517 } 518 519 //go:nosplit 520 func throw(s string) { 521 print("fatal error: ", s, "\n") 522 gp := getg() 523 if gp.m.throwing == 0 { 524 gp.m.throwing = 1 525 } 526 startpanic() 527 dopanic(0) 528 *(*int)(nil) = 0 // not reached 529 } 530