Source file src/runtime/panic.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "internal/runtime/atomic" 11 "internal/runtime/sys" 12 "internal/stringslite" 13 "unsafe" 14 ) 15 16 // throwType indicates the current type of ongoing throw, which affects the 17 // amount of detail printed to stderr. Higher values include more detail. 18 type throwType uint32 19 20 const ( 21 // throwTypeNone means that we are not throwing. 22 throwTypeNone throwType = iota 23 24 // throwTypeUser is a throw due to a problem with the application. 25 // 26 // These throws do not include runtime frames, system goroutines, or 27 // frame metadata. 28 throwTypeUser 29 30 // throwTypeRuntime is a throw due to a problem with Go itself. 31 // 32 // These throws include as much information as possible to aid in 33 // debugging the runtime, including runtime frames, system goroutines, 34 // and frame metadata. 35 throwTypeRuntime 36 ) 37 38 // We have two different ways of doing defers. The older way involves creating a 39 // defer record at the time that a defer statement is executing and adding it to a 40 // defer chain. This chain is inspected by the deferreturn call at all function 41 // exits in order to run the appropriate defer calls. A cheaper way (which we call 42 // open-coded defers) is used for functions in which no defer statements occur in 43 // loops. In that case, we simply store the defer function/arg information into 44 // specific stack slots at the point of each defer statement, as well as setting a 45 // bit in a bitmask. At each function exit, we add inline code to directly make 46 // the appropriate defer calls based on the bitmask and fn/arg information stored 47 // on the stack. During panic/Goexit processing, the appropriate defer calls are 48 // made using extra funcdata info that indicates the exact stack slots that 49 // contain the bitmask and defer fn/args. 50 51 // Check to make sure we can really generate a panic. If the panic 52 // was generated from the runtime, or from inside malloc, then convert 53 // to a throw of msg. 54 // pc should be the program counter of the compiler-generated code that 55 // triggered this panic. 56 func panicCheck1(pc uintptr, msg string) { 57 if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { 58 // Note: wasm can't tail call, so we can't get the original caller's pc. 59 throw(msg) 60 } 61 // TODO: is this redundant? How could we be in malloc 62 // but not in the runtime? internal/runtime/*, maybe? 63 gp := getg() 64 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 65 throw(msg) 66 } 67 } 68 69 // Same as above, but calling from the runtime is allowed. 70 // 71 // Using this function is necessary for any panic that may be 72 // generated by runtime.sigpanic, since those are always called by the 73 // runtime. 74 func panicCheck2(err string) { 75 // panic allocates, so to avoid recursive malloc, turn panics 76 // during malloc into throws. 77 gp := getg() 78 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 79 throw(err) 80 } 81 } 82 83 // Many of the following panic entry-points turn into throws when they 84 // happen in various runtime contexts. These should never happen in 85 // the runtime, and if they do, they indicate a serious issue and 86 // should not be caught by user code. 87 // 88 // The panic{Index,Slice,divide,shift} functions are called by 89 // code generated by the compiler for out of bounds index expressions, 90 // out of bounds slice expressions, division by zero, and shift by negative. 91 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 92 // functions are called by the signal handler when a signal occurs 93 // indicating the respective problem. 94 // 95 // Since panic{Index,Slice,shift} are never called directly, and 96 // since the runtime package should never have an out of bounds slice 97 // or array reference or negative shift, if we see those functions called from the 98 // runtime package we turn the panic into a throw. That will dump the 99 // entire runtime stack for easier debugging. 100 // 101 // The entry points called by the signal handler will be called from 102 // runtime.sigpanic, so we can't disallow calls from the runtime to 103 // these (they always look like they're called from the runtime). 104 // Hence, for these, we just check for clearly bad runtime conditions. 105 // 106 // The panic{Index,Slice} functions are implemented in assembly and tail call 107 // to the goPanic{Index,Slice} functions below. This is done so we can use 108 // a space-minimal register calling convention. 109 110 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 111 // 112 //go:yeswritebarrierrec 113 func goPanicIndex(x int, y int) { 114 panicCheck1(sys.GetCallerPC(), "index out of range") 115 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 116 } 117 118 //go:yeswritebarrierrec 119 func goPanicIndexU(x uint, y int) { 120 panicCheck1(sys.GetCallerPC(), "index out of range") 121 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 122 } 123 124 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 125 // 126 //go:yeswritebarrierrec 127 func goPanicSliceAlen(x int, y int) { 128 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 129 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 130 } 131 132 //go:yeswritebarrierrec 133 func goPanicSliceAlenU(x uint, y int) { 134 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 135 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 136 } 137 138 //go:yeswritebarrierrec 139 func goPanicSliceAcap(x int, y int) { 140 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 141 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 142 } 143 144 //go:yeswritebarrierrec 145 func goPanicSliceAcapU(x uint, y int) { 146 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 147 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 148 } 149 150 // failures in the comparisons for s[x:y], 0 <= x <= y 151 // 152 //go:yeswritebarrierrec 153 func goPanicSliceB(x int, y int) { 154 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 155 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 156 } 157 158 //go:yeswritebarrierrec 159 func goPanicSliceBU(x uint, y int) { 160 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 161 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 162 } 163 164 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 165 func goPanicSlice3Alen(x int, y int) { 166 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 167 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 168 } 169 func goPanicSlice3AlenU(x uint, y int) { 170 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 171 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 172 } 173 func goPanicSlice3Acap(x int, y int) { 174 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 175 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 176 } 177 func goPanicSlice3AcapU(x uint, y int) { 178 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 179 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 180 } 181 182 // failures in the comparisons for s[:x:y], 0 <= x <= y 183 func goPanicSlice3B(x int, y int) { 184 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 185 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 186 } 187 func goPanicSlice3BU(x uint, y int) { 188 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 189 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 190 } 191 192 // failures in the comparisons for s[x:y:], 0 <= x <= y 193 func goPanicSlice3C(x int, y int) { 194 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 195 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 196 } 197 func goPanicSlice3CU(x uint, y int) { 198 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 199 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 200 } 201 202 // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) 203 func goPanicSliceConvert(x int, y int) { 204 panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array") 205 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 206 } 207 208 // Implemented in assembly, as they take arguments in registers. 209 // Declared here to mark them as ABIInternal. 210 func panicIndex(x int, y int) 211 func panicIndexU(x uint, y int) 212 func panicSliceAlen(x int, y int) 213 func panicSliceAlenU(x uint, y int) 214 func panicSliceAcap(x int, y int) 215 func panicSliceAcapU(x uint, y int) 216 func panicSliceB(x int, y int) 217 func panicSliceBU(x uint, y int) 218 func panicSlice3Alen(x int, y int) 219 func panicSlice3AlenU(x uint, y int) 220 func panicSlice3Acap(x int, y int) 221 func panicSlice3AcapU(x uint, y int) 222 func panicSlice3B(x int, y int) 223 func panicSlice3BU(x uint, y int) 224 func panicSlice3C(x int, y int) 225 func panicSlice3CU(x uint, y int) 226 func panicSliceConvert(x int, y int) 227 228 var shiftError = error(errorString("negative shift amount")) 229 230 //go:yeswritebarrierrec 231 func panicshift() { 232 panicCheck1(sys.GetCallerPC(), "negative shift amount") 233 panic(shiftError) 234 } 235 236 var divideError = error(errorString("integer divide by zero")) 237 238 //go:yeswritebarrierrec 239 func panicdivide() { 240 panicCheck2("integer divide by zero") 241 panic(divideError) 242 } 243 244 var overflowError = error(errorString("integer overflow")) 245 246 func panicoverflow() { 247 panicCheck2("integer overflow") 248 panic(overflowError) 249 } 250 251 var floatError = error(errorString("floating point error")) 252 253 func panicfloat() { 254 panicCheck2("floating point error") 255 panic(floatError) 256 } 257 258 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 259 260 func panicmem() { 261 panicCheck2("invalid memory address or nil pointer dereference") 262 panic(memoryError) 263 } 264 265 func panicmemAddr(addr uintptr) { 266 panicCheck2("invalid memory address or nil pointer dereference") 267 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 268 } 269 270 // Create a new deferred function fn, which has no arguments and results. 271 // The compiler turns a defer statement into a call to this. 272 func deferproc(fn func()) { 273 gp := getg() 274 if gp.m.curg != gp { 275 // go code on the system stack can't defer 276 throw("defer on system stack") 277 } 278 279 d := newdefer() 280 d.link = gp._defer 281 gp._defer = d 282 d.fn = fn 283 d.pc = sys.GetCallerPC() 284 // We must not be preempted between calling GetCallerSP and 285 // storing it to d.sp because GetCallerSP's result is a 286 // uintptr stack pointer. 287 d.sp = sys.GetCallerSP() 288 } 289 290 var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false")) 291 var rangePanicError = error(errorString("range function continued iteration after loop body panic")) 292 var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit")) 293 var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking")) 294 295 //go:noinline 296 func panicrangestate(state int) { 297 switch abi.RF_State(state) { 298 case abi.RF_DONE: 299 panic(rangeDoneError) 300 case abi.RF_PANIC: 301 panic(rangePanicError) 302 case abi.RF_EXHAUSTED: 303 panic(rangeExhaustedError) 304 case abi.RF_MISSING_PANIC: 305 panic(rangeMissingPanicError) 306 } 307 throw("unexpected state passed to panicrangestate") 308 } 309 310 // deferrangefunc is called by functions that are about to 311 // execute a range-over-function loop in which the loop body 312 // may execute a defer statement. That defer needs to add to 313 // the chain for the current function, not the func literal synthesized 314 // to represent the loop body. To do that, the original function 315 // calls deferrangefunc to obtain an opaque token representing 316 // the current frame, and then the loop body uses deferprocat 317 // instead of deferproc to add to that frame's defer lists. 318 // 319 // The token is an 'any' with underlying type *atomic.Pointer[_defer]. 320 // It is the atomically-updated head of a linked list of _defer structs 321 // representing deferred calls. At the same time, we create a _defer 322 // struct on the main g._defer list with d.head set to this head pointer. 323 // 324 // The g._defer list is now a linked list of deferred calls, 325 // but an atomic list hanging off: 326 // 327 // g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil 328 // | .head 329 // | 330 // +--> dY -> dX -> nil 331 // 332 // with each -> indicating a d.link pointer, and where drangefunc 333 // has the d.rangefunc = true bit set. 334 // Note that the function being ranged over may have added 335 // its own defers (d4 and d3), so drangefunc need not be at the 336 // top of the list when deferprocat is used. This is why we pass 337 // the atomic head explicitly. 338 // 339 // To keep misbehaving programs from crashing the runtime, 340 // deferprocat pushes new defers onto the .head list atomically. 341 // The fact that it is a separate list from the main goroutine 342 // defer list means that the main goroutine's defers can still 343 // be handled non-atomically. 344 // 345 // In the diagram, dY and dX are meant to be processed when 346 // drangefunc would be processed, which is to say the defer order 347 // should be d4, d3, dY, dX, d2, d1. To make that happen, 348 // when defer processing reaches a d with rangefunc=true, 349 // it calls deferconvert to atomically take the extras 350 // away from d.head and then adds them to the main list. 351 // 352 // That is, deferconvert changes this list: 353 // 354 // g._defer => drangefunc -> d2 -> d1 -> nil 355 // | .head 356 // | 357 // +--> dY -> dX -> nil 358 // 359 // into this list: 360 // 361 // g._defer => dY -> dX -> d2 -> d1 -> nil 362 // 363 // It also poisons *drangefunc.head so that any future 364 // deferprocat using that head will throw. 365 // (The atomic head is ordinary garbage collected memory so that 366 // it's not a problem if user code holds onto it beyond 367 // the lifetime of drangefunc.) 368 // 369 // TODO: We could arrange for the compiler to call into the 370 // runtime after the loop finishes normally, to do an eager 371 // deferconvert, which would catch calling the loop body 372 // and having it defer after the loop is done. If we have a 373 // more general catch of loop body misuse, though, this 374 // might not be worth worrying about in addition. 375 // 376 // See also ../cmd/compile/internal/rangefunc/rewrite.go. 377 func deferrangefunc() any { 378 gp := getg() 379 if gp.m.curg != gp { 380 // go code on the system stack can't defer 381 throw("defer on system stack") 382 } 383 384 d := newdefer() 385 d.link = gp._defer 386 gp._defer = d 387 d.pc = sys.GetCallerPC() 388 // We must not be preempted between calling GetCallerSP and 389 // storing it to d.sp because GetCallerSP's result is a 390 // uintptr stack pointer. 391 d.sp = sys.GetCallerSP() 392 393 d.rangefunc = true 394 d.head = new(atomic.Pointer[_defer]) 395 396 return d.head 397 } 398 399 // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head. 400 func badDefer() *_defer { 401 return (*_defer)(unsafe.Pointer(uintptr(1))) 402 } 403 404 // deferprocat is like deferproc but adds to the atomic list represented by frame. 405 // See the doc comment for deferrangefunc for details. 406 func deferprocat(fn func(), frame any) { 407 head := frame.(*atomic.Pointer[_defer]) 408 if raceenabled { 409 racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat)) 410 } 411 d1 := newdefer() 412 d1.fn = fn 413 for { 414 d1.link = head.Load() 415 if d1.link == badDefer() { 416 throw("defer after range func returned") 417 } 418 if head.CompareAndSwap(d1.link, d1) { 419 break 420 } 421 } 422 } 423 424 // deferconvert converts the rangefunc defer list of d0 into an ordinary list 425 // following d0. 426 // See the doc comment for deferrangefunc for details. 427 func deferconvert(d0 *_defer) { 428 head := d0.head 429 if raceenabled { 430 racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert)) 431 } 432 tail := d0.link 433 d0.rangefunc = false 434 435 var d *_defer 436 for { 437 d = head.Load() 438 if head.CompareAndSwap(d, badDefer()) { 439 break 440 } 441 } 442 if d == nil { 443 return 444 } 445 for d1 := d; ; d1 = d1.link { 446 d1.sp = d0.sp 447 d1.pc = d0.pc 448 if d1.link == nil { 449 d1.link = tail 450 break 451 } 452 } 453 d0.link = d 454 return 455 } 456 457 // deferprocStack queues a new deferred function with a defer record on the stack. 458 // The defer record must have its fn field initialized. 459 // All other fields can contain junk. 460 // Nosplit because of the uninitialized pointer fields on the stack. 461 // 462 //go:nosplit 463 func deferprocStack(d *_defer) { 464 gp := getg() 465 if gp.m.curg != gp { 466 // go code on the system stack can't defer 467 throw("defer on system stack") 468 } 469 470 // fn is already set. 471 // The other fields are junk on entry to deferprocStack and 472 // are initialized here. 473 d.heap = false 474 d.rangefunc = false 475 d.sp = sys.GetCallerSP() 476 d.pc = sys.GetCallerPC() 477 // The lines below implement: 478 // d.panic = nil 479 // d.fd = nil 480 // d.link = gp._defer 481 // d.head = nil 482 // gp._defer = d 483 // But without write barriers. The first three are writes to 484 // the stack so they don't need a write barrier, and furthermore 485 // are to uninitialized memory, so they must not use a write barrier. 486 // The fourth write does not require a write barrier because we 487 // explicitly mark all the defer structures, so we don't need to 488 // keep track of pointers to them with a write barrier. 489 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 490 *(*uintptr)(unsafe.Pointer(&d.head)) = 0 491 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 492 } 493 494 // Each P holds a pool for defers. 495 496 // Allocate a Defer, usually using per-P pool. 497 // Each defer must be released with freedefer. The defer is not 498 // added to any defer chain yet. 499 func newdefer() *_defer { 500 var d *_defer 501 mp := acquirem() 502 pp := mp.p.ptr() 503 if len(pp.deferpool) == 0 && sched.deferpool != nil { 504 lock(&sched.deferlock) 505 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 506 d := sched.deferpool 507 sched.deferpool = d.link 508 d.link = nil 509 pp.deferpool = append(pp.deferpool, d) 510 } 511 unlock(&sched.deferlock) 512 } 513 if n := len(pp.deferpool); n > 0 { 514 d = pp.deferpool[n-1] 515 pp.deferpool[n-1] = nil 516 pp.deferpool = pp.deferpool[:n-1] 517 } 518 releasem(mp) 519 mp, pp = nil, nil 520 521 if d == nil { 522 // Allocate new defer. 523 d = new(_defer) 524 } 525 d.heap = true 526 return d 527 } 528 529 // popDefer pops the head of gp's defer list and frees it. 530 func popDefer(gp *g) { 531 d := gp._defer 532 d.fn = nil // Can in theory point to the stack 533 // We must not copy the stack between the updating gp._defer and setting 534 // d.link to nil. Between these two steps, d is not on any defer list, so 535 // stack copying won't adjust stack pointers in it (namely, d.link). Hence, 536 // if we were to copy the stack, d could then contain a stale pointer. 537 gp._defer = d.link 538 d.link = nil 539 // After this point we can copy the stack. 540 541 if !d.heap { 542 return 543 } 544 545 mp := acquirem() 546 pp := mp.p.ptr() 547 if len(pp.deferpool) == cap(pp.deferpool) { 548 // Transfer half of local cache to the central cache. 549 var first, last *_defer 550 for len(pp.deferpool) > cap(pp.deferpool)/2 { 551 n := len(pp.deferpool) 552 d := pp.deferpool[n-1] 553 pp.deferpool[n-1] = nil 554 pp.deferpool = pp.deferpool[:n-1] 555 if first == nil { 556 first = d 557 } else { 558 last.link = d 559 } 560 last = d 561 } 562 lock(&sched.deferlock) 563 last.link = sched.deferpool 564 sched.deferpool = first 565 unlock(&sched.deferlock) 566 } 567 568 *d = _defer{} 569 570 pp.deferpool = append(pp.deferpool, d) 571 572 releasem(mp) 573 mp, pp = nil, nil 574 } 575 576 // deferreturn runs deferred functions for the caller's frame. 577 // The compiler inserts a call to this at the end of any 578 // function which calls defer. 579 func deferreturn() { 580 var p _panic 581 p.deferreturn = true 582 583 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 584 for { 585 fn, ok := p.nextDefer() 586 if !ok { 587 break 588 } 589 fn() 590 } 591 } 592 593 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 594 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 595 // is not a panic, any recover calls in those deferred functions will return nil. 596 // 597 // Calling Goexit from the main goroutine terminates that goroutine 598 // without func main returning. Since func main has not returned, 599 // the program continues execution of other goroutines. 600 // If all other goroutines exit, the program crashes. 601 // 602 // It crashes if called from a thread not created by the Go runtime. 603 func Goexit() { 604 // Create a panic object for Goexit, so we can recognize when it might be 605 // bypassed by a recover(). 606 var p _panic 607 p.goexit = true 608 609 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 610 for { 611 fn, ok := p.nextDefer() 612 if !ok { 613 break 614 } 615 fn() 616 } 617 618 goexit1() 619 } 620 621 // Call all Error and String methods before freezing the world. 622 // Used when crashing with panicking. 623 func preprintpanics(p *_panic) { 624 defer func() { 625 text := "panic while printing panic value" 626 switch r := recover().(type) { 627 case nil: 628 // nothing to do 629 case string: 630 throw(text + ": " + r) 631 default: 632 throw(text + ": type " + toRType(efaceOf(&r)._type).string()) 633 } 634 }() 635 for p != nil { 636 if p.link != nil && *efaceOf(&p.link.arg) == *efaceOf(&p.arg) { 637 // This panic contains the same value as the next one in the chain. 638 // Mark it as reraised. We will skip printing it twice in a row. 639 p.link.reraised = true 640 p = p.link 641 continue 642 } 643 switch v := p.arg.(type) { 644 case error: 645 p.arg = v.Error() 646 case stringer: 647 p.arg = v.String() 648 } 649 p = p.link 650 } 651 } 652 653 // Print all currently active panics. Used when crashing. 654 // Should only be called after preprintpanics. 655 func printpanics(p *_panic) { 656 if p.link != nil { 657 printpanics(p.link) 658 if p.link.reraised { 659 return 660 } 661 if !p.link.goexit { 662 print("\t") 663 } 664 } 665 if p.goexit { 666 return 667 } 668 print("panic: ") 669 printpanicval(p.arg) 670 if p.reraised { 671 print(" [recovered, reraised]") 672 } else if p.recovered { 673 print(" [recovered]") 674 } 675 print("\n") 676 } 677 678 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 679 // uint32 and a pointer to the byte following the varint. 680 // 681 // The implementation is the same with runtime.readvarint, except that this function 682 // uses unsafe.Pointer for speed. 683 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 684 var r uint32 685 var shift int 686 for { 687 b := *(*uint8)(fd) 688 fd = add(fd, unsafe.Sizeof(b)) 689 if b < 128 { 690 return r + uint32(b)<<shift, fd 691 } 692 r += uint32(b&0x7F) << (shift & 31) 693 shift += 7 694 if shift > 28 { 695 panic("Bad varint") 696 } 697 } 698 } 699 700 // A PanicNilError happens when code calls panic(nil). 701 // 702 // Before Go 1.21, programs that called panic(nil) observed recover returning nil. 703 // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. 704 // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. 705 type PanicNilError struct { 706 // This field makes PanicNilError structurally different from 707 // any other struct in this package, and the _ makes it different 708 // from any struct in other packages too. 709 // This avoids any accidental conversions being possible 710 // between this struct and some other struct sharing the same fields, 711 // like happened in go.dev/issue/56603. 712 _ [0]*PanicNilError 713 } 714 715 func (*PanicNilError) Error() string { return "panic called with nil argument" } 716 func (*PanicNilError) RuntimeError() {} 717 718 var panicnil = &godebugInc{name: "panicnil"} 719 720 // The implementation of the predeclared function panic. 721 // The compiler emits calls to this function. 722 // 723 // gopanic should be an internal detail, 724 // but widely used packages access it using linkname. 725 // Notable members of the hall of shame include: 726 // - go.undefinedlabs.com/scopeagent 727 // - github.com/goplus/igop 728 // 729 // Do not remove or change the type signature. 730 // See go.dev/issue/67401. 731 // 732 //go:linkname gopanic 733 func gopanic(e any) { 734 if e == nil { 735 if debug.panicnil.Load() != 1 { 736 e = new(PanicNilError) 737 } else { 738 panicnil.IncNonDefault() 739 } 740 } 741 742 gp := getg() 743 if gp.m.curg != gp { 744 print("panic: ") 745 printpanicval(e) 746 print("\n") 747 throw("panic on system stack") 748 } 749 750 if gp.m.mallocing != 0 { 751 print("panic: ") 752 printpanicval(e) 753 print("\n") 754 throw("panic during malloc") 755 } 756 if gp.m.preemptoff != "" { 757 print("panic: ") 758 printpanicval(e) 759 print("\n") 760 print("preempt off reason: ") 761 print(gp.m.preemptoff) 762 print("\n") 763 throw("panic during preemptoff") 764 } 765 if gp.m.locks != 0 { 766 print("panic: ") 767 printpanicval(e) 768 print("\n") 769 throw("panic holding locks") 770 } 771 772 var p _panic 773 p.arg = e 774 775 runningPanicDefers.Add(1) 776 777 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 778 for { 779 fn, ok := p.nextDefer() 780 if !ok { 781 break 782 } 783 fn() 784 } 785 786 // If we're tracing, flush the current generation to make the trace more 787 // readable. 788 // 789 // TODO(aktau): Handle a panic from within traceAdvance more gracefully. 790 // Currently it would hang. Not handled now because it is very unlikely, and 791 // already unrecoverable. 792 if traceEnabled() { 793 traceAdvance(false) 794 } 795 796 // ran out of deferred calls - old-school panic now 797 // Because it is unsafe to call arbitrary user code after freezing 798 // the world, we call preprintpanics to invoke all necessary Error 799 // and String methods to prepare the panic strings before startpanic. 800 preprintpanics(&p) 801 802 fatalpanic(&p) // should not return 803 *(*int)(nil) = 0 // not reached 804 } 805 806 // start initializes a panic to start unwinding the stack. 807 // 808 // If p.goexit is true, then start may return multiple times. 809 func (p *_panic) start(pc uintptr, sp unsafe.Pointer) { 810 gp := getg() 811 812 // Record the caller's PC and SP, so recovery can identify panics 813 // that have been recovered. Also, so that if p is from Goexit, we 814 // can restart its defer processing loop if a recovered panic tries 815 // to jump past it. 816 p.startPC = sys.GetCallerPC() 817 p.startSP = unsafe.Pointer(sys.GetCallerSP()) 818 819 if p.deferreturn { 820 p.sp = sp 821 822 if s := (*savedOpenDeferState)(gp.param); s != nil { 823 // recovery saved some state for us, so that we can resume 824 // calling open-coded defers without unwinding the stack. 825 826 gp.param = nil 827 828 p.retpc = s.retpc 829 p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset)) 830 p.slotsPtr = add(sp, s.slotsOffset) 831 } 832 833 return 834 } 835 836 p.link = gp._panic 837 gp._panic = (*_panic)(noescape(unsafe.Pointer(p))) 838 839 // Initialize state machine, and find the first frame with a defer. 840 // 841 // Note: We could use startPC and startSP here, but callers will 842 // never have defer statements themselves. By starting at their 843 // caller instead, we avoid needing to unwind through an extra 844 // frame. It also somewhat simplifies the terminating condition for 845 // deferreturn. 846 p.lr, p.fp = pc, sp 847 p.nextFrame() 848 } 849 850 // nextDefer returns the next deferred function to invoke, if any. 851 // 852 // Note: The "ok bool" result is necessary to correctly handle when 853 // the deferred function itself was nil (e.g., "defer (func())(nil)"). 854 func (p *_panic) nextDefer() (func(), bool) { 855 gp := getg() 856 857 if !p.deferreturn { 858 if gp._panic != p { 859 throw("bad panic stack") 860 } 861 862 if p.recovered { 863 mcall(recovery) // does not return 864 throw("recovery failed") 865 } 866 } 867 868 // The assembler adjusts p.argp in wrapper functions that shouldn't 869 // be visible to recover(), so we need to restore it each iteration. 870 p.argp = add(p.startSP, sys.MinFrameSize) 871 872 for { 873 for p.deferBitsPtr != nil { 874 bits := *p.deferBitsPtr 875 876 // Check whether any open-coded defers are still pending. 877 // 878 // Note: We need to check this upfront (rather than after 879 // clearing the top bit) because it's possible that Goexit 880 // invokes a deferred call, and there were still more pending 881 // open-coded defers in the frame; but then the deferred call 882 // panic and invoked the remaining defers in the frame, before 883 // recovering and restarting the Goexit loop. 884 if bits == 0 { 885 p.deferBitsPtr = nil 886 break 887 } 888 889 // Find index of top bit set. 890 i := 7 - uintptr(sys.LeadingZeros8(bits)) 891 892 // Clear bit and store it back. 893 bits &^= 1 << i 894 *p.deferBitsPtr = bits 895 896 return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true 897 } 898 899 Recheck: 900 if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { 901 if d.rangefunc { 902 deferconvert(d) 903 popDefer(gp) 904 goto Recheck 905 } 906 907 fn := d.fn 908 909 p.retpc = d.pc 910 911 // Unlink and free. 912 popDefer(gp) 913 914 return fn, true 915 } 916 917 if !p.nextFrame() { 918 return nil, false 919 } 920 } 921 } 922 923 // nextFrame finds the next frame that contains deferred calls, if any. 924 func (p *_panic) nextFrame() (ok bool) { 925 if p.lr == 0 { 926 return false 927 } 928 929 gp := getg() 930 systemstack(func() { 931 var limit uintptr 932 if d := gp._defer; d != nil { 933 limit = d.sp 934 } 935 936 var u unwinder 937 u.initAt(p.lr, uintptr(p.fp), 0, gp, 0) 938 for { 939 if !u.valid() { 940 p.lr = 0 941 return // ok == false 942 } 943 944 // TODO(mdempsky): If we populate u.frame.fn.deferreturn for 945 // every frame containing a defer (not just open-coded defers), 946 // then we can simply loop until we find the next frame where 947 // it's non-zero. 948 949 if u.frame.sp == limit { 950 break // found a frame with linked defers 951 } 952 953 if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) { 954 break // found a frame with open-coded defers 955 } 956 957 u.next() 958 } 959 960 p.lr = u.frame.lr 961 p.sp = unsafe.Pointer(u.frame.sp) 962 p.fp = unsafe.Pointer(u.frame.fp) 963 964 ok = true 965 }) 966 967 return 968 } 969 970 func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { 971 fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo) 972 if fd == nil { 973 return false 974 } 975 976 if fn.deferreturn == 0 { 977 throw("missing deferreturn") 978 } 979 980 deferBitsOffset, fd := readvarintUnsafe(fd) 981 deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset))) 982 if *deferBitsPtr == 0 { 983 return false // has open-coded defers, but none pending 984 } 985 986 slotsOffset, fd := readvarintUnsafe(fd) 987 988 p.retpc = fn.entry() + uintptr(fn.deferreturn) 989 p.deferBitsPtr = deferBitsPtr 990 p.slotsPtr = add(varp, -uintptr(slotsOffset)) 991 992 return true 993 } 994 995 // The implementation of the predeclared function recover. 996 // Cannot split the stack because it needs to reliably 997 // find the stack segment of its caller. 998 // 999 // TODO(rsc): Once we commit to CopyStackAlways, 1000 // this doesn't need to be nosplit. 1001 // 1002 //go:nosplit 1003 func gorecover(argp uintptr) any { 1004 // Must be in a function running as part of a deferred call during the panic. 1005 // Must be called from the topmost function of the call 1006 // (the function used in the defer statement). 1007 // p.argp is the argument pointer of that topmost deferred function call. 1008 // Compare against argp reported by caller. 1009 // If they match, the caller is the one who can recover. 1010 gp := getg() 1011 p := gp._panic 1012 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1013 p.recovered = true 1014 return p.arg 1015 } 1016 return nil 1017 } 1018 1019 //go:linkname sync_throw sync.throw 1020 func sync_throw(s string) { 1021 throw(s) 1022 } 1023 1024 //go:linkname sync_fatal sync.fatal 1025 func sync_fatal(s string) { 1026 fatal(s) 1027 } 1028 1029 //go:linkname rand_fatal crypto/rand.fatal 1030 func rand_fatal(s string) { 1031 fatal(s) 1032 } 1033 1034 //go:linkname sysrand_fatal crypto/internal/sysrand.fatal 1035 func sysrand_fatal(s string) { 1036 fatal(s) 1037 } 1038 1039 //go:linkname fips_fatal crypto/internal/fips140.fatal 1040 func fips_fatal(s string) { 1041 fatal(s) 1042 } 1043 1044 //go:linkname maps_fatal internal/runtime/maps.fatal 1045 func maps_fatal(s string) { 1046 fatal(s) 1047 } 1048 1049 //go:linkname internal_sync_throw internal/sync.throw 1050 func internal_sync_throw(s string) { 1051 throw(s) 1052 } 1053 1054 //go:linkname internal_sync_fatal internal/sync.fatal 1055 func internal_sync_fatal(s string) { 1056 fatal(s) 1057 } 1058 1059 // throw triggers a fatal error that dumps a stack trace and exits. 1060 // 1061 // throw should be used for runtime-internal fatal errors where Go itself, 1062 // rather than user code, may be at fault for the failure. 1063 // 1064 // throw should be an internal detail, 1065 // but widely used packages access it using linkname. 1066 // Notable members of the hall of shame include: 1067 // - github.com/bytedance/sonic 1068 // - github.com/cockroachdb/pebble 1069 // - github.com/dgraph-io/ristretto 1070 // - github.com/outcaste-io/ristretto 1071 // - github.com/pingcap/br 1072 // - gvisor.dev/gvisor 1073 // - github.com/sagernet/gvisor 1074 // 1075 // Do not remove or change the type signature. 1076 // See go.dev/issue/67401. 1077 // 1078 //go:linkname throw 1079 //go:nosplit 1080 func throw(s string) { 1081 // Everything throw does should be recursively nosplit so it 1082 // can be called even when it's unsafe to grow the stack. 1083 systemstack(func() { 1084 print("fatal error: ") 1085 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1086 print("\n") 1087 }) 1088 1089 fatalthrow(throwTypeRuntime) 1090 } 1091 1092 // fatal triggers a fatal error that dumps a stack trace and exits. 1093 // 1094 // fatal is equivalent to throw, but is used when user code is expected to be 1095 // at fault for the failure, such as racing map writes. 1096 // 1097 // fatal does not include runtime frames, system goroutines, or frame metadata 1098 // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. 1099 // 1100 //go:nosplit 1101 func fatal(s string) { 1102 // Everything fatal does should be recursively nosplit so it 1103 // can be called even when it's unsafe to grow the stack. 1104 printlock() // Prevent multiple interleaved fatal reports. See issue 69447. 1105 systemstack(func() { 1106 print("fatal error: ") 1107 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1108 print("\n") 1109 }) 1110 1111 fatalthrow(throwTypeUser) 1112 printunlock() 1113 } 1114 1115 // runningPanicDefers is non-zero while running deferred functions for panic. 1116 // This is used to try hard to get a panic stack trace out when exiting. 1117 var runningPanicDefers atomic.Uint32 1118 1119 // panicking is non-zero when crashing the program for an unrecovered panic. 1120 var panicking atomic.Uint32 1121 1122 // paniclk is held while printing the panic information and stack trace, 1123 // so that two concurrent panics don't overlap their output. 1124 var paniclk mutex 1125 1126 // Unwind the stack after a deferred function calls recover 1127 // after a panic. Then arrange to continue running as though 1128 // the caller of the deferred function returned normally. 1129 // 1130 // However, if unwinding the stack would skip over a Goexit call, we 1131 // return into the Goexit loop instead, so it can continue processing 1132 // defers instead. 1133 func recovery(gp *g) { 1134 p := gp._panic 1135 pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) 1136 p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 1137 1138 // The linker records the f-relative address of a call to deferreturn in f's funcInfo. 1139 // Assuming a "normal" call to recover() inside one of f's deferred functions 1140 // invoked for a panic, that is the desired PC for exiting f. 1141 f := findfunc(pc) 1142 if f.deferreturn == 0 { 1143 throw("no deferreturn") 1144 } 1145 gotoPc := f.entry() + uintptr(f.deferreturn) 1146 1147 // Unwind the panic stack. 1148 for ; p != nil && uintptr(p.startSP) < sp; p = p.link { 1149 // Don't allow jumping past a pending Goexit. 1150 // Instead, have its _panic.start() call return again. 1151 // 1152 // TODO(mdempsky): In this case, Goexit will resume walking the 1153 // stack where it left off, which means it will need to rewalk 1154 // frames that we've already processed. 1155 // 1156 // There's a similar issue with nested panics, when the inner 1157 // panic supersedes the outer panic. Again, we end up needing to 1158 // walk the same stack frames. 1159 // 1160 // These are probably pretty rare occurrences in practice, and 1161 // they don't seem any worse than the existing logic. But if we 1162 // move the unwinding state into _panic, we could detect when we 1163 // run into where the last panic started, and then just pick up 1164 // where it left off instead. 1165 // 1166 // With how subtle defer handling is, this might not actually be 1167 // worthwhile though. 1168 if p.goexit { 1169 gotoPc, sp = p.startPC, uintptr(p.startSP) 1170 saveOpenDeferState = false // goexit is unwinding the stack anyway 1171 break 1172 } 1173 1174 runningPanicDefers.Add(-1) 1175 } 1176 gp._panic = p 1177 1178 if p == nil { // must be done with signal 1179 gp.sig = 0 1180 } 1181 1182 if gp.param != nil { 1183 throw("unexpected gp.param") 1184 } 1185 if saveOpenDeferState { 1186 // If we're returning to deferreturn and there are more open-coded 1187 // defers for it to call, save enough state for it to be able to 1188 // pick up where p0 left off. 1189 gp.param = unsafe.Pointer(&savedOpenDeferState{ 1190 retpc: p0.retpc, 1191 1192 // We need to save deferBitsPtr and slotsPtr too, but those are 1193 // stack pointers. To avoid issues around heap objects pointing 1194 // to the stack, save them as offsets from SP. 1195 deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp), 1196 slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp), 1197 }) 1198 } 1199 1200 // TODO(mdempsky): Currently, we rely on frames containing "defer" 1201 // to end with "CALL deferreturn; RET". This allows deferreturn to 1202 // finish running any pending defers in the frame. 1203 // 1204 // But we should be able to tell whether there are still pending 1205 // defers here. If there aren't, we can just jump directly to the 1206 // "RET" instruction. And if there are, we don't need an actual 1207 // "CALL deferreturn" instruction; we can simulate it with something 1208 // like: 1209 // 1210 // if usesLR { 1211 // lr = pc 1212 // } else { 1213 // sp -= sizeof(pc) 1214 // *(*uintptr)(sp) = pc 1215 // } 1216 // pc = funcPC(deferreturn) 1217 // 1218 // So that we effectively tail call into deferreturn, such that it 1219 // then returns to the simple "RET" epilogue. That would save the 1220 // overhead of the "deferreturn" call when there aren't actually any 1221 // pending defers left, and shrink the TEXT size of compiled 1222 // binaries. (Admittedly, both of these are modest savings.) 1223 1224 // Ensure we're recovering within the appropriate stack. 1225 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1226 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1227 throw("bad recovery") 1228 } 1229 1230 // branch directly to the deferreturn 1231 gp.sched.sp = sp 1232 gp.sched.pc = gotoPc 1233 gp.sched.lr = 0 1234 // Restore the bp on platforms that support frame pointers. 1235 // N.B. It's fine to not set anything for platforms that don't 1236 // support frame pointers, since nothing consumes them. 1237 switch { 1238 case goarch.IsAmd64 != 0: 1239 // on x86, fp actually points one word higher than the top of 1240 // the frame since the return address is saved on the stack by 1241 // the caller 1242 gp.sched.bp = fp - 2*goarch.PtrSize 1243 case goarch.IsArm64 != 0: 1244 // on arm64, the architectural bp points one word higher 1245 // than the sp. fp is totally useless to us here, because it 1246 // only gets us to the caller's fp. 1247 gp.sched.bp = sp - goarch.PtrSize 1248 } 1249 gogo(&gp.sched) 1250 } 1251 1252 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1253 // system, prints stack traces starting from its caller, and terminates the 1254 // process. 1255 // 1256 //go:nosplit 1257 func fatalthrow(t throwType) { 1258 pc := sys.GetCallerPC() 1259 sp := sys.GetCallerSP() 1260 gp := getg() 1261 1262 if gp.m.throwing == throwTypeNone { 1263 gp.m.throwing = t 1264 } 1265 1266 // Switch to the system stack to avoid any stack growth, which may make 1267 // things worse if the runtime is in a bad state. 1268 systemstack(func() { 1269 if isSecureMode() { 1270 exit(2) 1271 } 1272 1273 startpanic_m() 1274 1275 if dopanic_m(gp, pc, sp) { 1276 // crash uses a decent amount of nosplit stack and we're already 1277 // low on stack in throw, so crash on the system stack (unlike 1278 // fatalpanic). 1279 crash() 1280 } 1281 1282 exit(2) 1283 }) 1284 1285 *(*int)(nil) = 0 // not reached 1286 } 1287 1288 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1289 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1290 // runningPanicDefers once main is blocked from exiting. 1291 // 1292 //go:nosplit 1293 func fatalpanic(msgs *_panic) { 1294 pc := sys.GetCallerPC() 1295 sp := sys.GetCallerSP() 1296 gp := getg() 1297 var docrash bool 1298 // Switch to the system stack to avoid any stack growth, which 1299 // may make things worse if the runtime is in a bad state. 1300 systemstack(func() { 1301 if startpanic_m() && msgs != nil { 1302 // There were panic messages and startpanic_m 1303 // says it's okay to try to print them. 1304 1305 // startpanic_m set panicking, which will 1306 // block main from exiting, so now OK to 1307 // decrement runningPanicDefers. 1308 runningPanicDefers.Add(-1) 1309 1310 printpanics(msgs) 1311 } 1312 1313 docrash = dopanic_m(gp, pc, sp) 1314 }) 1315 1316 if docrash { 1317 // By crashing outside the above systemstack call, debuggers 1318 // will not be confused when generating a backtrace. 1319 // Function crash is marked nosplit to avoid stack growth. 1320 crash() 1321 } 1322 1323 systemstack(func() { 1324 exit(2) 1325 }) 1326 1327 *(*int)(nil) = 0 // not reached 1328 } 1329 1330 // startpanic_m prepares for an unrecoverable panic. 1331 // 1332 // It returns true if panic messages should be printed, or false if 1333 // the runtime is in bad shape and should just print stacks. 1334 // 1335 // It must not have write barriers even though the write barrier 1336 // explicitly ignores writes once dying > 0. Write barriers still 1337 // assume that g.m.p != nil, and this function may not have P 1338 // in some contexts (e.g. a panic in a signal handler for a signal 1339 // sent to an M with no P). 1340 // 1341 //go:nowritebarrierrec 1342 func startpanic_m() bool { 1343 gp := getg() 1344 if mheap_.cachealloc.size == 0 { // very early 1345 print("runtime: panic before malloc heap initialized\n") 1346 } 1347 // Disallow malloc during an unrecoverable panic. A panic 1348 // could happen in a signal handler, or in a throw, or inside 1349 // malloc itself. We want to catch if an allocation ever does 1350 // happen (even if we're not in one of these situations). 1351 gp.m.mallocing++ 1352 1353 // If we're dying because of a bad lock count, set it to a 1354 // good lock count so we don't recursively panic below. 1355 if gp.m.locks < 0 { 1356 gp.m.locks = 1 1357 } 1358 1359 switch gp.m.dying { 1360 case 0: 1361 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1362 gp.m.dying = 1 1363 panicking.Add(1) 1364 lock(&paniclk) 1365 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1366 schedtrace(true) 1367 } 1368 freezetheworld() 1369 return true 1370 case 1: 1371 // Something failed while panicking. 1372 // Just print a stack trace and exit. 1373 gp.m.dying = 2 1374 print("panic during panic\n") 1375 return false 1376 case 2: 1377 // This is a genuine bug in the runtime, we couldn't even 1378 // print the stack trace successfully. 1379 gp.m.dying = 3 1380 print("stack trace unavailable\n") 1381 exit(4) 1382 fallthrough 1383 default: 1384 // Can't even print! Just exit. 1385 exit(5) 1386 return false // Need to return something. 1387 } 1388 } 1389 1390 var didothers bool 1391 var deadlock mutex 1392 1393 // gp is the crashing g running on this M, but may be a user G, while getg() is 1394 // always g0. 1395 func dopanic_m(gp *g, pc, sp uintptr) bool { 1396 if gp.sig != 0 { 1397 signame := signame(gp.sig) 1398 if signame != "" { 1399 print("[signal ", signame) 1400 } else { 1401 print("[signal ", hex(gp.sig)) 1402 } 1403 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1404 } 1405 1406 level, all, docrash := gotraceback() 1407 if level > 0 { 1408 if gp != gp.m.curg { 1409 all = true 1410 } 1411 if gp != gp.m.g0 { 1412 print("\n") 1413 goroutineheader(gp) 1414 traceback(pc, sp, 0, gp) 1415 } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { 1416 print("\nruntime stack:\n") 1417 traceback(pc, sp, 0, gp) 1418 } 1419 if !didothers && all { 1420 didothers = true 1421 tracebackothers(gp) 1422 } 1423 } 1424 unlock(&paniclk) 1425 1426 if panicking.Add(-1) != 0 { 1427 // Some other m is panicking too. 1428 // Let it print what it needs to print. 1429 // Wait forever without chewing up cpu. 1430 // It will exit when it's done. 1431 lock(&deadlock) 1432 lock(&deadlock) 1433 } 1434 1435 printDebugLog() 1436 1437 return docrash 1438 } 1439 1440 // canpanic returns false if a signal should throw instead of 1441 // panicking. 1442 // 1443 //go:nosplit 1444 func canpanic() bool { 1445 gp := getg() 1446 mp := acquirem() 1447 1448 // Is it okay for gp to panic instead of crashing the program? 1449 // Yes, as long as it is running Go code, not runtime code, 1450 // and not stuck in a system call. 1451 if gp != mp.curg { 1452 releasem(mp) 1453 return false 1454 } 1455 // N.B. mp.locks != 1 instead of 0 to account for acquirem. 1456 if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { 1457 releasem(mp) 1458 return false 1459 } 1460 status := readgstatus(gp) 1461 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1462 releasem(mp) 1463 return false 1464 } 1465 if GOOS == "windows" && mp.libcallsp != 0 { 1466 releasem(mp) 1467 return false 1468 } 1469 releasem(mp) 1470 return true 1471 } 1472 1473 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1474 // return PC (pushing a frame for the call). Otherwise, it should be 1475 // left alone so that LR is used as sigpanic's return PC, effectively 1476 // replacing the top-most frame with sigpanic. This is used by 1477 // preparePanic. 1478 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1479 if pc == 0 { 1480 // Probably a call to a nil func. The old LR is more 1481 // useful in the stack trace. Not pushing the frame 1482 // will make the trace look like a call to sigpanic 1483 // instead. (Otherwise the trace will end at sigpanic 1484 // and we won't get to see who faulted.) 1485 return false 1486 } 1487 // If we don't recognize the PC as code, but we do recognize 1488 // the link register as code, then this assumes the panic was 1489 // caused by a call to non-code. In this case, we want to 1490 // ignore this call to make unwinding show the context. 1491 // 1492 // If we running C code, we're not going to recognize pc as a 1493 // Go function, so just assume it's good. Otherwise, traceback 1494 // may try to read a stale LR that looks like a Go code 1495 // pointer and wander into the woods. 1496 if gp.m.incgo || findfunc(pc).valid() { 1497 // This wasn't a bad call, so use PC as sigpanic's 1498 // return PC. 1499 return true 1500 } 1501 if findfunc(lr).valid() { 1502 // This was a bad call, but the LR is good, so use the 1503 // LR as sigpanic's return PC. 1504 return false 1505 } 1506 // Neither the PC or LR is good. Hopefully pushing a frame 1507 // will work. 1508 return true 1509 } 1510 1511 // isAbortPC reports whether pc is the program counter at which 1512 // runtime.abort raises a signal. 1513 // 1514 // It is nosplit because it's part of the isgoexception 1515 // implementation. 1516 // 1517 //go:nosplit 1518 func isAbortPC(pc uintptr) bool { 1519 f := findfunc(pc) 1520 if !f.valid() { 1521 return false 1522 } 1523 return f.funcID == abi.FuncID_abort 1524 } 1525