github.com/AESNooper/go/src@v0.0.0-20220218095104-b56a4ab1bbbb/runtime/signal_unix.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "runtime/internal/atomic" 12 "runtime/internal/sys" 13 "unsafe" 14 ) 15 16 // sigTabT is the type of an entry in the global sigtable array. 17 // sigtable is inherently system dependent, and appears in OS-specific files, 18 // but sigTabT is the same for all Unixy systems. 19 // The sigtable array is indexed by a system signal number to get the flags 20 // and printable name of each signal. 21 type sigTabT struct { 22 flags int32 23 name string 24 } 25 26 //go:linkname os_sigpipe os.sigpipe 27 func os_sigpipe() { 28 systemstack(sigpipe) 29 } 30 31 func signame(sig uint32) string { 32 if sig >= uint32(len(sigtable)) { 33 return "" 34 } 35 return sigtable[sig].name 36 } 37 38 const ( 39 _SIG_DFL uintptr = 0 40 _SIG_IGN uintptr = 1 41 ) 42 43 // sigPreempt is the signal used for non-cooperative preemption. 44 // 45 // There's no good way to choose this signal, but there are some 46 // heuristics: 47 // 48 // 1. It should be a signal that's passed-through by debuggers by 49 // default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO, 50 // SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals. 51 // 52 // 2. It shouldn't be used internally by libc in mixed Go/C binaries 53 // because libc may assume it's the only thing that can handle these 54 // signals. For example SIGCANCEL or SIGSETXID. 55 // 56 // 3. It should be a signal that can happen spuriously without 57 // consequences. For example, SIGALRM is a bad choice because the 58 // signal handler can't tell if it was caused by the real process 59 // alarm or not (arguably this means the signal is broken, but I 60 // digress). SIGUSR1 and SIGUSR2 are also bad because those are often 61 // used in meaningful ways by applications. 62 // 63 // 4. We need to deal with platforms without real-time signals (like 64 // macOS), so those are out. 65 // 66 // We use SIGURG because it meets all of these criteria, is extremely 67 // unlikely to be used by an application for its "real" meaning (both 68 // because out-of-band data is basically unused and because SIGURG 69 // doesn't report which socket has the condition, making it pretty 70 // useless), and even if it is, the application has to be ready for 71 // spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more 72 // likely to be used for real. 73 const sigPreempt = _SIGURG 74 75 // Stores the signal handlers registered before Go installed its own. 76 // These signal handlers will be invoked in cases where Go doesn't want to 77 // handle a particular signal (e.g., signal occurred on a non-Go thread). 78 // See sigfwdgo for more information on when the signals are forwarded. 79 // 80 // This is read by the signal handler; accesses should use 81 // atomic.Loaduintptr and atomic.Storeuintptr. 82 var fwdSig [_NSIG]uintptr 83 84 // handlingSig is indexed by signal number and is non-zero if we are 85 // currently handling the signal. Or, to put it another way, whether 86 // the signal handler is currently set to the Go signal handler or not. 87 // This is uint32 rather than bool so that we can use atomic instructions. 88 var handlingSig [_NSIG]uint32 89 90 // channels for synchronizing signal mask updates with the signal mask 91 // thread 92 var ( 93 disableSigChan chan uint32 94 enableSigChan chan uint32 95 maskUpdatedChan chan struct{} 96 ) 97 98 func init() { 99 // _NSIG is the number of signals on this operating system. 100 // sigtable should describe what to do for all the possible signals. 101 if len(sigtable) != _NSIG { 102 print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n") 103 throw("bad sigtable len") 104 } 105 } 106 107 var signalsOK bool 108 109 // Initialize signals. 110 // Called by libpreinit so runtime may not be initialized. 111 //go:nosplit 112 //go:nowritebarrierrec 113 func initsig(preinit bool) { 114 if !preinit { 115 // It's now OK for signal handlers to run. 116 signalsOK = true 117 } 118 119 // For c-archive/c-shared this is called by libpreinit with 120 // preinit == true. 121 if (isarchive || islibrary) && !preinit { 122 return 123 } 124 125 for i := uint32(0); i < _NSIG; i++ { 126 t := &sigtable[i] 127 if t.flags == 0 || t.flags&_SigDefault != 0 { 128 continue 129 } 130 131 // We don't need to use atomic operations here because 132 // there shouldn't be any other goroutines running yet. 133 fwdSig[i] = getsig(i) 134 135 if !sigInstallGoHandler(i) { 136 // Even if we are not installing a signal handler, 137 // set SA_ONSTACK if necessary. 138 if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN { 139 setsigstack(i) 140 } else if fwdSig[i] == _SIG_IGN { 141 sigInitIgnored(i) 142 } 143 continue 144 } 145 146 handlingSig[i] = 1 147 setsig(i, abi.FuncPCABIInternal(sighandler)) 148 } 149 } 150 151 //go:nosplit 152 //go:nowritebarrierrec 153 func sigInstallGoHandler(sig uint32) bool { 154 // For some signals, we respect an inherited SIG_IGN handler 155 // rather than insist on installing our own default handler. 156 // Even these signals can be fetched using the os/signal package. 157 switch sig { 158 case _SIGHUP, _SIGINT: 159 if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN { 160 return false 161 } 162 } 163 164 t := &sigtable[sig] 165 if t.flags&_SigSetStack != 0 { 166 return false 167 } 168 169 // When built using c-archive or c-shared, only install signal 170 // handlers for synchronous signals and SIGPIPE and sigPreempt. 171 if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt { 172 return false 173 } 174 175 return true 176 } 177 178 // sigenable enables the Go signal handler to catch the signal sig. 179 // It is only called while holding the os/signal.handlers lock, 180 // via os/signal.enableSignal and signal_enable. 181 func sigenable(sig uint32) { 182 if sig >= uint32(len(sigtable)) { 183 return 184 } 185 186 // SIGPROF is handled specially for profiling. 187 if sig == _SIGPROF { 188 return 189 } 190 191 t := &sigtable[sig] 192 if t.flags&_SigNotify != 0 { 193 ensureSigM() 194 enableSigChan <- sig 195 <-maskUpdatedChan 196 if atomic.Cas(&handlingSig[sig], 0, 1) { 197 atomic.Storeuintptr(&fwdSig[sig], getsig(sig)) 198 setsig(sig, abi.FuncPCABIInternal(sighandler)) 199 } 200 } 201 } 202 203 // sigdisable disables the Go signal handler for the signal sig. 204 // It is only called while holding the os/signal.handlers lock, 205 // via os/signal.disableSignal and signal_disable. 206 func sigdisable(sig uint32) { 207 if sig >= uint32(len(sigtable)) { 208 return 209 } 210 211 // SIGPROF is handled specially for profiling. 212 if sig == _SIGPROF { 213 return 214 } 215 216 t := &sigtable[sig] 217 if t.flags&_SigNotify != 0 { 218 ensureSigM() 219 disableSigChan <- sig 220 <-maskUpdatedChan 221 222 // If initsig does not install a signal handler for a 223 // signal, then to go back to the state before Notify 224 // we should remove the one we installed. 225 if !sigInstallGoHandler(sig) { 226 atomic.Store(&handlingSig[sig], 0) 227 setsig(sig, atomic.Loaduintptr(&fwdSig[sig])) 228 } 229 } 230 } 231 232 // sigignore ignores the signal sig. 233 // It is only called while holding the os/signal.handlers lock, 234 // via os/signal.ignoreSignal and signal_ignore. 235 func sigignore(sig uint32) { 236 if sig >= uint32(len(sigtable)) { 237 return 238 } 239 240 // SIGPROF is handled specially for profiling. 241 if sig == _SIGPROF { 242 return 243 } 244 245 t := &sigtable[sig] 246 if t.flags&_SigNotify != 0 { 247 atomic.Store(&handlingSig[sig], 0) 248 setsig(sig, _SIG_IGN) 249 } 250 } 251 252 // clearSignalHandlers clears all signal handlers that are not ignored 253 // back to the default. This is called by the child after a fork, so that 254 // we can enable the signal mask for the exec without worrying about 255 // running a signal handler in the child. 256 //go:nosplit 257 //go:nowritebarrierrec 258 func clearSignalHandlers() { 259 for i := uint32(0); i < _NSIG; i++ { 260 if atomic.Load(&handlingSig[i]) != 0 { 261 setsig(i, _SIG_DFL) 262 } 263 } 264 } 265 266 // setProcessCPUProfilerTimer is called when the profiling timer changes. 267 // It is called with prof.signalLock held. hz is the new timer, and is 0 if 268 // profiling is being disabled. Enable or disable the signal as 269 // required for -buildmode=c-archive. 270 func setProcessCPUProfilerTimer(hz int32) { 271 if hz != 0 { 272 // Enable the Go signal handler if not enabled. 273 if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) { 274 atomic.Storeuintptr(&fwdSig[_SIGPROF], getsig(_SIGPROF)) 275 setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler)) 276 } 277 278 var it itimerval 279 it.it_interval.tv_sec = 0 280 it.it_interval.set_usec(1000000 / hz) 281 it.it_value = it.it_interval 282 setitimer(_ITIMER_PROF, &it, nil) 283 } else { 284 setitimer(_ITIMER_PROF, &itimerval{}, nil) 285 286 // If the Go signal handler should be disabled by default, 287 // switch back to the signal handler that was installed 288 // when we enabled profiling. We don't try to handle the case 289 // of a program that changes the SIGPROF handler while Go 290 // profiling is enabled. 291 // 292 // If no signal handler was installed before, then start 293 // ignoring SIGPROF signals. We do this, rather than change 294 // to SIG_DFL, because there may be a pending SIGPROF 295 // signal that has not yet been delivered to some other thread. 296 // If we change to SIG_DFL here, the program will crash 297 // when that SIGPROF is delivered. We assume that programs 298 // that use profiling don't want to crash on a stray SIGPROF. 299 // See issue 19320. 300 if !sigInstallGoHandler(_SIGPROF) { 301 if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) { 302 h := atomic.Loaduintptr(&fwdSig[_SIGPROF]) 303 if h == _SIG_DFL { 304 h = _SIG_IGN 305 } 306 setsig(_SIGPROF, h) 307 } 308 } 309 } 310 } 311 312 // setThreadCPUProfilerHz makes any thread-specific changes required to 313 // implement profiling at a rate of hz. 314 // No changes required on Unix systems when using setitimer. 315 func setThreadCPUProfilerHz(hz int32) { 316 getg().m.profilehz = hz 317 } 318 319 func sigpipe() { 320 if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) { 321 return 322 } 323 dieFromSignal(_SIGPIPE) 324 } 325 326 // doSigPreempt handles a preemption signal on gp. 327 func doSigPreempt(gp *g, ctxt *sigctxt) { 328 // Check if this G wants to be preempted and is safe to 329 // preempt. 330 if wantAsyncPreempt(gp) { 331 if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok { 332 // Adjust the PC and inject a call to asyncPreempt. 333 ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc) 334 } 335 } 336 337 // Acknowledge the preemption. 338 atomic.Xadd(&gp.m.preemptGen, 1) 339 atomic.Store(&gp.m.signalPending, 0) 340 341 if GOOS == "darwin" || GOOS == "ios" { 342 atomic.Xadd(&pendingPreemptSignals, -1) 343 } 344 } 345 346 const preemptMSupported = true 347 348 // preemptM sends a preemption request to mp. This request may be 349 // handled asynchronously and may be coalesced with other requests to 350 // the M. When the request is received, if the running G or P are 351 // marked for preemption and the goroutine is at an asynchronous 352 // safe-point, it will preempt the goroutine. It always atomically 353 // increments mp.preemptGen after handling a preemption request. 354 func preemptM(mp *m) { 355 // On Darwin, don't try to preempt threads during exec. 356 // Issue #41702. 357 if GOOS == "darwin" || GOOS == "ios" { 358 execLock.rlock() 359 } 360 361 if atomic.Cas(&mp.signalPending, 0, 1) { 362 if GOOS == "darwin" || GOOS == "ios" { 363 atomic.Xadd(&pendingPreemptSignals, 1) 364 } 365 366 // If multiple threads are preempting the same M, it may send many 367 // signals to the same M such that it hardly make progress, causing 368 // live-lock problem. Apparently this could happen on darwin. See 369 // issue #37741. 370 // Only send a signal if there isn't already one pending. 371 signalM(mp, sigPreempt) 372 } 373 374 if GOOS == "darwin" || GOOS == "ios" { 375 execLock.runlock() 376 } 377 } 378 379 // sigFetchG fetches the value of G safely when running in a signal handler. 380 // On some architectures, the g value may be clobbered when running in a VDSO. 381 // See issue #32912. 382 // 383 //go:nosplit 384 func sigFetchG(c *sigctxt) *g { 385 switch GOARCH { 386 case "arm", "arm64", "ppc64", "ppc64le", "riscv64": 387 if !iscgo && inVDSOPage(c.sigpc()) { 388 // When using cgo, we save the g on TLS and load it from there 389 // in sigtramp. Just use that. 390 // Otherwise, before making a VDSO call we save the g to the 391 // bottom of the signal stack. Fetch from there. 392 // TODO: in efence mode, stack is sysAlloc'd, so this wouldn't 393 // work. 394 sp := getcallersp() 395 s := spanOf(sp) 396 if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit { 397 gp := *(**g)(unsafe.Pointer(s.base())) 398 return gp 399 } 400 return nil 401 } 402 } 403 return getg() 404 } 405 406 // sigtrampgo is called from the signal handler function, sigtramp, 407 // written in assembly code. 408 // This is called by the signal handler, and the world may be stopped. 409 // 410 // It must be nosplit because getg() is still the G that was running 411 // (if any) when the signal was delivered, but it's (usually) called 412 // on the gsignal stack. Until this switches the G to gsignal, the 413 // stack bounds check won't work. 414 // 415 //go:nosplit 416 //go:nowritebarrierrec 417 func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) { 418 if sigfwdgo(sig, info, ctx) { 419 return 420 } 421 c := &sigctxt{info, ctx} 422 g := sigFetchG(c) 423 setg(g) 424 if g == nil { 425 if sig == _SIGPROF { 426 // Some platforms (Linux) have per-thread timers, which we use in 427 // combination with the process-wide timer. Avoid double-counting. 428 if validSIGPROF(nil, c) { 429 sigprofNonGoPC(c.sigpc()) 430 } 431 return 432 } 433 if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 { 434 // This is probably a signal from preemptM sent 435 // while executing Go code but received while 436 // executing non-Go code. 437 // We got past sigfwdgo, so we know that there is 438 // no non-Go signal handler for sigPreempt. 439 // The default behavior for sigPreempt is to ignore 440 // the signal, so badsignal will be a no-op anyway. 441 if GOOS == "darwin" || GOOS == "ios" { 442 atomic.Xadd(&pendingPreemptSignals, -1) 443 } 444 return 445 } 446 c.fixsigcode(sig) 447 badsignal(uintptr(sig), c) 448 return 449 } 450 451 setg(g.m.gsignal) 452 453 // If some non-Go code called sigaltstack, adjust. 454 var gsignalStack gsignalStack 455 setStack := adjustSignalStack(sig, g.m, &gsignalStack) 456 if setStack { 457 g.m.gsignal.stktopsp = getcallersp() 458 } 459 460 if g.stackguard0 == stackFork { 461 signalDuringFork(sig) 462 } 463 464 c.fixsigcode(sig) 465 sighandler(sig, info, ctx, g) 466 setg(g) 467 if setStack { 468 restoreGsignalStack(&gsignalStack) 469 } 470 } 471 472 // If the signal handler receives a SIGPROF signal on a non-Go thread, 473 // it tries to collect a traceback into sigprofCallers. 474 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 475 var sigprofCallers cgoCallers 476 var sigprofCallersUse uint32 477 478 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 479 // and the signal handler collected a stack trace in sigprofCallers. 480 // When this is called, sigprofCallersUse will be non-zero. 481 // g is nil, and what we can do is very limited. 482 // 483 // It is called from the signal handling functions written in assembly code that 484 // are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have 485 // not verified that the SIGPROF delivery corresponds to the best available 486 // profiling source for this thread. 487 // 488 //go:nosplit 489 //go:nowritebarrierrec 490 func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) { 491 if prof.hz != 0 { 492 c := &sigctxt{info, ctx} 493 // Some platforms (Linux) have per-thread timers, which we use in 494 // combination with the process-wide timer. Avoid double-counting. 495 if validSIGPROF(nil, c) { 496 n := 0 497 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 498 n++ 499 } 500 cpuprof.addNonGo(sigprofCallers[:n]) 501 } 502 } 503 504 atomic.Store(&sigprofCallersUse, 0) 505 } 506 507 // sigprofNonGoPC is called when a profiling signal arrived on a 508 // non-Go thread and we have a single PC value, not a stack trace. 509 // g is nil, and what we can do is very limited. 510 //go:nosplit 511 //go:nowritebarrierrec 512 func sigprofNonGoPC(pc uintptr) { 513 if prof.hz != 0 { 514 stk := []uintptr{ 515 pc, 516 abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum, 517 } 518 cpuprof.addNonGo(stk) 519 } 520 } 521 522 // adjustSignalStack adjusts the current stack guard based on the 523 // stack pointer that is actually in use while handling a signal. 524 // We do this in case some non-Go code called sigaltstack. 525 // This reports whether the stack was adjusted, and if so stores the old 526 // signal stack in *gsigstack. 527 //go:nosplit 528 func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool { 529 sp := uintptr(unsafe.Pointer(&sig)) 530 if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi { 531 return false 532 } 533 534 var st stackt 535 sigaltstack(nil, &st) 536 stsp := uintptr(unsafe.Pointer(st.ss_sp)) 537 if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size { 538 setGsignalStack(&st, gsigStack) 539 return true 540 } 541 542 if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi { 543 // The signal was delivered on the g0 stack. 544 // This can happen when linked with C code 545 // using the thread sanitizer, which collects 546 // signals then delivers them itself by calling 547 // the signal handler directly when C code, 548 // including C code called via cgo, calls a 549 // TSAN-intercepted function such as malloc. 550 // 551 // We check this condition last as g0.stack.lo 552 // may be not very accurate (see mstart). 553 st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo} 554 setSignalstackSP(&st, mp.g0.stack.lo) 555 setGsignalStack(&st, gsigStack) 556 return true 557 } 558 559 // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad. 560 setg(nil) 561 needm() 562 if st.ss_flags&_SS_DISABLE != 0 { 563 noSignalStack(sig) 564 } else { 565 sigNotOnStack(sig) 566 } 567 dropm() 568 return false 569 } 570 571 // crashing is the number of m's we have waited for when implementing 572 // GOTRACEBACK=crash when a signal is received. 573 var crashing int32 574 575 // testSigtrap and testSigusr1 are used by the runtime tests. If 576 // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the 577 // normal behavior on this signal is suppressed. 578 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool 579 var testSigusr1 func(gp *g) bool 580 581 // sighandler is invoked when a signal occurs. The global g will be 582 // set to a gsignal goroutine and we will be running on the alternate 583 // signal stack. The parameter g will be the value of the global g 584 // when the signal occurred. The sig, info, and ctxt parameters are 585 // from the system signal handler: they are the parameters passed when 586 // the SA is passed to the sigaction system call. 587 // 588 // The garbage collector may have stopped the world, so write barriers 589 // are not allowed. 590 // 591 //go:nowritebarrierrec 592 func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { 593 _g_ := getg() 594 c := &sigctxt{info, ctxt} 595 596 if sig == _SIGPROF { 597 mp := _g_.m 598 // Some platforms (Linux) have per-thread timers, which we use in 599 // combination with the process-wide timer. Avoid double-counting. 600 if validSIGPROF(mp, c) { 601 sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp) 602 } 603 return 604 } 605 606 if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) { 607 return 608 } 609 610 if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) { 611 return 612 } 613 614 if sig == sigPreempt && debug.asyncpreemptoff == 0 { 615 // Might be a preemption signal. 616 doSigPreempt(gp, c) 617 // Even if this was definitely a preemption signal, it 618 // may have been coalesced with another signal, so we 619 // still let it through to the application. 620 } 621 622 flags := int32(_SigThrow) 623 if sig < uint32(len(sigtable)) { 624 flags = sigtable[sig].flags 625 } 626 if c.sigcode() != _SI_USER && flags&_SigPanic != 0 && gp.throwsplit { 627 // We can't safely sigpanic because it may grow the 628 // stack. Abort in the signal handler instead. 629 flags = _SigThrow 630 } 631 if isAbortPC(c.sigpc()) { 632 // On many architectures, the abort function just 633 // causes a memory fault. Don't turn that into a panic. 634 flags = _SigThrow 635 } 636 if c.sigcode() != _SI_USER && flags&_SigPanic != 0 { 637 // The signal is going to cause a panic. 638 // Arrange the stack so that it looks like the point 639 // where the signal occurred made a call to the 640 // function sigpanic. Then set the PC to sigpanic. 641 642 // Have to pass arguments out of band since 643 // augmenting the stack frame would break 644 // the unwinding code. 645 gp.sig = sig 646 gp.sigcode0 = uintptr(c.sigcode()) 647 gp.sigcode1 = uintptr(c.fault()) 648 gp.sigpc = c.sigpc() 649 650 c.preparePanic(sig, gp) 651 return 652 } 653 654 if c.sigcode() == _SI_USER || flags&_SigNotify != 0 { 655 if sigsend(sig) { 656 return 657 } 658 } 659 660 if c.sigcode() == _SI_USER && signal_ignored(sig) { 661 return 662 } 663 664 if flags&_SigKill != 0 { 665 dieFromSignal(sig) 666 } 667 668 // _SigThrow means that we should exit now. 669 // If we get here with _SigPanic, it means that the signal 670 // was sent to us by a program (c.sigcode() == _SI_USER); 671 // in that case, if we didn't handle it in sigsend, we exit now. 672 if flags&(_SigThrow|_SigPanic) == 0 { 673 return 674 } 675 676 _g_.m.throwing = 1 677 _g_.m.caughtsig.set(gp) 678 679 if crashing == 0 { 680 startpanic_m() 681 } 682 683 if sig < uint32(len(sigtable)) { 684 print(sigtable[sig].name, "\n") 685 } else { 686 print("Signal ", sig, "\n") 687 } 688 689 print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n") 690 if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil { 691 print("signal arrived during cgo execution\n") 692 // Switch to curg so that we get a traceback of the Go code 693 // leading up to the cgocall, which switched from curg to g0. 694 gp = _g_.m.curg 695 } 696 if sig == _SIGILL || sig == _SIGFPE { 697 // It would be nice to know how long the instruction is. 698 // Unfortunately, that's complicated to do in general (mostly for x86 699 // and s930x, but other archs have non-standard instruction lengths also). 700 // Opt to print 16 bytes, which covers most instructions. 701 const maxN = 16 702 n := uintptr(maxN) 703 // We have to be careful, though. If we're near the end of 704 // a page and the following page isn't mapped, we could 705 // segfault. So make sure we don't straddle a page (even though 706 // that could lead to printing an incomplete instruction). 707 // We're assuming here we can read at least the page containing the PC. 708 // I suppose it is possible that the page is mapped executable but not readable? 709 pc := c.sigpc() 710 if n > physPageSize-pc%physPageSize { 711 n = physPageSize - pc%physPageSize 712 } 713 print("instruction bytes:") 714 b := (*[maxN]byte)(unsafe.Pointer(pc)) 715 for i := uintptr(0); i < n; i++ { 716 print(" ", hex(b[i])) 717 } 718 println() 719 } 720 print("\n") 721 722 level, _, docrash := gotraceback() 723 if level > 0 { 724 goroutineheader(gp) 725 tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp) 726 if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning { 727 // tracebackothers on original m skipped this one; trace it now. 728 goroutineheader(_g_.m.curg) 729 traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg) 730 } else if crashing == 0 { 731 tracebackothers(gp) 732 print("\n") 733 } 734 dumpregs(c) 735 } 736 737 if docrash { 738 crashing++ 739 if crashing < mcount()-int32(extraMCount) { 740 // There are other m's that need to dump their stacks. 741 // Relay SIGQUIT to the next m by sending it to the current process. 742 // All m's that have already received SIGQUIT have signal masks blocking 743 // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet. 744 // When the last m receives the SIGQUIT, it will fall through to the call to 745 // crash below. Just in case the relaying gets botched, each m involved in 746 // the relay sleeps for 5 seconds and then does the crash/exit itself. 747 // In expected operation, the last m has received the SIGQUIT and run 748 // crash/exit and the process is gone, all long before any of the 749 // 5-second sleeps have finished. 750 print("\n-----\n\n") 751 raiseproc(_SIGQUIT) 752 usleep(5 * 1000 * 1000) 753 } 754 crash() 755 } 756 757 printDebugLog() 758 759 exit(2) 760 } 761 762 // sigpanic turns a synchronous signal into a run-time panic. 763 // If the signal handler sees a synchronous panic, it arranges the 764 // stack to look like the function where the signal occurred called 765 // sigpanic, sets the signal's PC value to sigpanic, and returns from 766 // the signal handler. The effect is that the program will act as 767 // though the function that got the signal simply called sigpanic 768 // instead. 769 // 770 // This must NOT be nosplit because the linker doesn't know where 771 // sigpanic calls can be injected. 772 // 773 // The signal handler must not inject a call to sigpanic if 774 // getg().throwsplit, since sigpanic may need to grow the stack. 775 // 776 // This is exported via linkname to assembly in runtime/cgo. 777 //go:linkname sigpanic 778 func sigpanic() { 779 g := getg() 780 if !canpanic(g) { 781 throw("unexpected signal during runtime execution") 782 } 783 784 switch g.sig { 785 case _SIGBUS: 786 if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 { 787 panicmem() 788 } 789 // Support runtime/debug.SetPanicOnFault. 790 if g.paniconfault { 791 panicmemAddr(g.sigcode1) 792 } 793 print("unexpected fault address ", hex(g.sigcode1), "\n") 794 throw("fault") 795 case _SIGSEGV: 796 if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 { 797 panicmem() 798 } 799 // Support runtime/debug.SetPanicOnFault. 800 if g.paniconfault { 801 panicmemAddr(g.sigcode1) 802 } 803 print("unexpected fault address ", hex(g.sigcode1), "\n") 804 throw("fault") 805 case _SIGFPE: 806 switch g.sigcode0 { 807 case _FPE_INTDIV: 808 panicdivide() 809 case _FPE_INTOVF: 810 panicoverflow() 811 } 812 panicfloat() 813 } 814 815 if g.sig >= uint32(len(sigtable)) { 816 // can't happen: we looked up g.sig in sigtable to decide to call sigpanic 817 throw("unexpected signal value") 818 } 819 panic(errorString(sigtable[g.sig].name)) 820 } 821 822 // dieFromSignal kills the program with a signal. 823 // This provides the expected exit status for the shell. 824 // This is only called with fatal signals expected to kill the process. 825 //go:nosplit 826 //go:nowritebarrierrec 827 func dieFromSignal(sig uint32) { 828 unblocksig(sig) 829 // Mark the signal as unhandled to ensure it is forwarded. 830 atomic.Store(&handlingSig[sig], 0) 831 raise(sig) 832 833 // That should have killed us. On some systems, though, raise 834 // sends the signal to the whole process rather than to just 835 // the current thread, which means that the signal may not yet 836 // have been delivered. Give other threads a chance to run and 837 // pick up the signal. 838 osyield() 839 osyield() 840 osyield() 841 842 // If that didn't work, try _SIG_DFL. 843 setsig(sig, _SIG_DFL) 844 raise(sig) 845 846 osyield() 847 osyield() 848 osyield() 849 850 // If we are still somehow running, just exit with the wrong status. 851 exit(2) 852 } 853 854 // raisebadsignal is called when a signal is received on a non-Go 855 // thread, and the Go program does not want to handle it (that is, the 856 // program has not called os/signal.Notify for the signal). 857 func raisebadsignal(sig uint32, c *sigctxt) { 858 if sig == _SIGPROF { 859 // Ignore profiling signals that arrive on non-Go threads. 860 return 861 } 862 863 var handler uintptr 864 if sig >= _NSIG { 865 handler = _SIG_DFL 866 } else { 867 handler = atomic.Loaduintptr(&fwdSig[sig]) 868 } 869 870 // Reset the signal handler and raise the signal. 871 // We are currently running inside a signal handler, so the 872 // signal is blocked. We need to unblock it before raising the 873 // signal, or the signal we raise will be ignored until we return 874 // from the signal handler. We know that the signal was unblocked 875 // before entering the handler, or else we would not have received 876 // it. That means that we don't have to worry about blocking it 877 // again. 878 unblocksig(sig) 879 setsig(sig, handler) 880 881 // If we're linked into a non-Go program we want to try to 882 // avoid modifying the original context in which the signal 883 // was raised. If the handler is the default, we know it 884 // is non-recoverable, so we don't have to worry about 885 // re-installing sighandler. At this point we can just 886 // return and the signal will be re-raised and caught by 887 // the default handler with the correct context. 888 // 889 // On FreeBSD, the libthr sigaction code prevents 890 // this from working so we fall through to raise. 891 if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && c.sigcode() != _SI_USER { 892 return 893 } 894 895 raise(sig) 896 897 // Give the signal a chance to be delivered. 898 // In almost all real cases the program is about to crash, 899 // so sleeping here is not a waste of time. 900 usleep(1000) 901 902 // If the signal didn't cause the program to exit, restore the 903 // Go signal handler and carry on. 904 // 905 // We may receive another instance of the signal before we 906 // restore the Go handler, but that is not so bad: we know 907 // that the Go program has been ignoring the signal. 908 setsig(sig, abi.FuncPCABIInternal(sighandler)) 909 } 910 911 //go:nosplit 912 func crash() { 913 // OS X core dumps are linear dumps of the mapped memory, 914 // from the first virtual byte to the last, with zeros in the gaps. 915 // Because of the way we arrange the address space on 64-bit systems, 916 // this means the OS X core file will be >128 GB and even on a zippy 917 // workstation can take OS X well over an hour to write (uninterruptible). 918 // Save users from making that mistake. 919 if GOOS == "darwin" && GOARCH == "amd64" { 920 return 921 } 922 923 dieFromSignal(_SIGABRT) 924 } 925 926 // ensureSigM starts one global, sleeping thread to make sure at least one thread 927 // is available to catch signals enabled for os/signal. 928 func ensureSigM() { 929 if maskUpdatedChan != nil { 930 return 931 } 932 maskUpdatedChan = make(chan struct{}) 933 disableSigChan = make(chan uint32) 934 enableSigChan = make(chan uint32) 935 go func() { 936 // Signal masks are per-thread, so make sure this goroutine stays on one 937 // thread. 938 LockOSThread() 939 defer UnlockOSThread() 940 // The sigBlocked mask contains the signals not active for os/signal, 941 // initially all signals except the essential. When signal.Notify()/Stop is called, 942 // sigenable/sigdisable in turn notify this thread to update its signal 943 // mask accordingly. 944 sigBlocked := sigset_all 945 for i := range sigtable { 946 if !blockableSig(uint32(i)) { 947 sigdelset(&sigBlocked, i) 948 } 949 } 950 sigprocmask(_SIG_SETMASK, &sigBlocked, nil) 951 for { 952 select { 953 case sig := <-enableSigChan: 954 if sig > 0 { 955 sigdelset(&sigBlocked, int(sig)) 956 } 957 case sig := <-disableSigChan: 958 if sig > 0 && blockableSig(sig) { 959 sigaddset(&sigBlocked, int(sig)) 960 } 961 } 962 sigprocmask(_SIG_SETMASK, &sigBlocked, nil) 963 maskUpdatedChan <- struct{}{} 964 } 965 }() 966 } 967 968 // This is called when we receive a signal when there is no signal stack. 969 // This can only happen if non-Go code calls sigaltstack to disable the 970 // signal stack. 971 func noSignalStack(sig uint32) { 972 println("signal", sig, "received on thread with no signal stack") 973 throw("non-Go code disabled sigaltstack") 974 } 975 976 // This is called if we receive a signal when there is a signal stack 977 // but we are not on it. This can only happen if non-Go code called 978 // sigaction without setting the SS_ONSTACK flag. 979 func sigNotOnStack(sig uint32) { 980 println("signal", sig, "received but handler not on signal stack") 981 throw("non-Go code set up signal handler without SA_ONSTACK flag") 982 } 983 984 // signalDuringFork is called if we receive a signal while doing a fork. 985 // We do not want signals at that time, as a signal sent to the process 986 // group may be delivered to the child process, causing confusion. 987 // This should never be called, because we block signals across the fork; 988 // this function is just a safety check. See issue 18600 for background. 989 func signalDuringFork(sig uint32) { 990 println("signal", sig, "received during fork") 991 throw("signal received during fork") 992 } 993 994 var badginsignalMsg = "fatal: bad g in signal handler\n" 995 996 // This runs on a foreign stack, without an m or a g. No stack split. 997 //go:nosplit 998 //go:norace 999 //go:nowritebarrierrec 1000 func badsignal(sig uintptr, c *sigctxt) { 1001 if !iscgo && !cgoHasExtraM { 1002 // There is no extra M. needm will not be able to grab 1003 // an M. Instead of hanging, just crash. 1004 // Cannot call split-stack function as there is no G. 1005 s := stringStructOf(&badginsignalMsg) 1006 write(2, s.str, int32(s.len)) 1007 exit(2) 1008 *(*uintptr)(unsafe.Pointer(uintptr(123))) = 2 1009 } 1010 needm() 1011 if !sigsend(uint32(sig)) { 1012 // A foreign thread received the signal sig, and the 1013 // Go code does not want to handle it. 1014 raisebadsignal(uint32(sig), c) 1015 } 1016 dropm() 1017 } 1018 1019 //go:noescape 1020 func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer) 1021 1022 // Determines if the signal should be handled by Go and if not, forwards the 1023 // signal to the handler that was installed before Go's. Returns whether the 1024 // signal was forwarded. 1025 // This is called by the signal handler, and the world may be stopped. 1026 //go:nosplit 1027 //go:nowritebarrierrec 1028 func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool { 1029 if sig >= uint32(len(sigtable)) { 1030 return false 1031 } 1032 fwdFn := atomic.Loaduintptr(&fwdSig[sig]) 1033 flags := sigtable[sig].flags 1034 1035 // If we aren't handling the signal, forward it. 1036 if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK { 1037 // If the signal is ignored, doing nothing is the same as forwarding. 1038 if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) { 1039 return true 1040 } 1041 // We are not handling the signal and there is no other handler to forward to. 1042 // Crash with the default behavior. 1043 if fwdFn == _SIG_DFL { 1044 setsig(sig, _SIG_DFL) 1045 dieFromSignal(sig) 1046 return false 1047 } 1048 1049 sigfwd(fwdFn, sig, info, ctx) 1050 return true 1051 } 1052 1053 // This function and its caller sigtrampgo assumes SIGPIPE is delivered on the 1054 // originating thread. This property does not hold on macOS (golang.org/issue/33384), 1055 // so we have no choice but to ignore SIGPIPE. 1056 if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE { 1057 return true 1058 } 1059 1060 // If there is no handler to forward to, no need to forward. 1061 if fwdFn == _SIG_DFL { 1062 return false 1063 } 1064 1065 c := &sigctxt{info, ctx} 1066 // Only forward synchronous signals and SIGPIPE. 1067 // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code 1068 // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket 1069 // or pipe. 1070 if (c.sigcode() == _SI_USER || flags&_SigPanic == 0) && sig != _SIGPIPE { 1071 return false 1072 } 1073 // Determine if the signal occurred inside Go code. We test that: 1074 // (1) we weren't in VDSO page, 1075 // (2) we were in a goroutine (i.e., m.curg != nil), and 1076 // (3) we weren't in CGO. 1077 g := sigFetchG(c) 1078 if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo { 1079 return false 1080 } 1081 1082 // Signal not handled by Go, forward it. 1083 if fwdFn != _SIG_IGN { 1084 sigfwd(fwdFn, sig, info, ctx) 1085 } 1086 1087 return true 1088 } 1089 1090 // sigsave saves the current thread's signal mask into *p. 1091 // This is used to preserve the non-Go signal mask when a non-Go 1092 // thread calls a Go function. 1093 // This is nosplit and nowritebarrierrec because it is called by needm 1094 // which may be called on a non-Go thread with no g available. 1095 //go:nosplit 1096 //go:nowritebarrierrec 1097 func sigsave(p *sigset) { 1098 sigprocmask(_SIG_SETMASK, nil, p) 1099 } 1100 1101 // msigrestore sets the current thread's signal mask to sigmask. 1102 // This is used to restore the non-Go signal mask when a non-Go thread 1103 // calls a Go function. 1104 // This is nosplit and nowritebarrierrec because it is called by dropm 1105 // after g has been cleared. 1106 //go:nosplit 1107 //go:nowritebarrierrec 1108 func msigrestore(sigmask sigset) { 1109 sigprocmask(_SIG_SETMASK, &sigmask, nil) 1110 } 1111 1112 // sigsetAllExiting is used by sigblock(true) when a thread is 1113 // exiting. sigset_all is defined in OS specific code, and per GOOS 1114 // behavior may override this default for sigsetAllExiting: see 1115 // osinit(). 1116 var sigsetAllExiting = sigset_all 1117 1118 // sigblock blocks signals in the current thread's signal mask. 1119 // This is used to block signals while setting up and tearing down g 1120 // when a non-Go thread calls a Go function. When a thread is exiting 1121 // we use the sigsetAllExiting value, otherwise the OS specific 1122 // definition of sigset_all is used. 1123 // This is nosplit and nowritebarrierrec because it is called by needm 1124 // which may be called on a non-Go thread with no g available. 1125 //go:nosplit 1126 //go:nowritebarrierrec 1127 func sigblock(exiting bool) { 1128 if exiting { 1129 sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil) 1130 return 1131 } 1132 sigprocmask(_SIG_SETMASK, &sigset_all, nil) 1133 } 1134 1135 // unblocksig removes sig from the current thread's signal mask. 1136 // This is nosplit and nowritebarrierrec because it is called from 1137 // dieFromSignal, which can be called by sigfwdgo while running in the 1138 // signal handler, on the signal stack, with no g available. 1139 //go:nosplit 1140 //go:nowritebarrierrec 1141 func unblocksig(sig uint32) { 1142 var set sigset 1143 sigaddset(&set, int(sig)) 1144 sigprocmask(_SIG_UNBLOCK, &set, nil) 1145 } 1146 1147 // minitSignals is called when initializing a new m to set the 1148 // thread's alternate signal stack and signal mask. 1149 func minitSignals() { 1150 minitSignalStack() 1151 minitSignalMask() 1152 } 1153 1154 // minitSignalStack is called when initializing a new m to set the 1155 // alternate signal stack. If the alternate signal stack is not set 1156 // for the thread (the normal case) then set the alternate signal 1157 // stack to the gsignal stack. If the alternate signal stack is set 1158 // for the thread (the case when a non-Go thread sets the alternate 1159 // signal stack and then calls a Go function) then set the gsignal 1160 // stack to the alternate signal stack. We also set the alternate 1161 // signal stack to the gsignal stack if cgo is not used (regardless 1162 // of whether it is already set). Record which choice was made in 1163 // newSigstack, so that it can be undone in unminit. 1164 func minitSignalStack() { 1165 _g_ := getg() 1166 var st stackt 1167 sigaltstack(nil, &st) 1168 if st.ss_flags&_SS_DISABLE != 0 || !iscgo { 1169 signalstack(&_g_.m.gsignal.stack) 1170 _g_.m.newSigstack = true 1171 } else { 1172 setGsignalStack(&st, &_g_.m.goSigStack) 1173 _g_.m.newSigstack = false 1174 } 1175 } 1176 1177 // minitSignalMask is called when initializing a new m to set the 1178 // thread's signal mask. When this is called all signals have been 1179 // blocked for the thread. This starts with m.sigmask, which was set 1180 // either from initSigmask for a newly created thread or by calling 1181 // sigsave if this is a non-Go thread calling a Go function. It 1182 // removes all essential signals from the mask, thus causing those 1183 // signals to not be blocked. Then it sets the thread's signal mask. 1184 // After this is called the thread can receive signals. 1185 func minitSignalMask() { 1186 nmask := getg().m.sigmask 1187 for i := range sigtable { 1188 if !blockableSig(uint32(i)) { 1189 sigdelset(&nmask, i) 1190 } 1191 } 1192 sigprocmask(_SIG_SETMASK, &nmask, nil) 1193 } 1194 1195 // unminitSignals is called from dropm, via unminit, to undo the 1196 // effect of calling minit on a non-Go thread. 1197 //go:nosplit 1198 func unminitSignals() { 1199 if getg().m.newSigstack { 1200 st := stackt{ss_flags: _SS_DISABLE} 1201 sigaltstack(&st, nil) 1202 } else { 1203 // We got the signal stack from someone else. Restore 1204 // the Go-allocated stack in case this M gets reused 1205 // for another thread (e.g., it's an extram). Also, on 1206 // Android, libc allocates a signal stack for all 1207 // threads, so it's important to restore the Go stack 1208 // even on Go-created threads so we can free it. 1209 restoreGsignalStack(&getg().m.goSigStack) 1210 } 1211 } 1212 1213 // blockableSig reports whether sig may be blocked by the signal mask. 1214 // We never want to block the signals marked _SigUnblock; 1215 // these are the synchronous signals that turn into a Go panic. 1216 // In a Go program--not a c-archive/c-shared--we never want to block 1217 // the signals marked _SigKill or _SigThrow, as otherwise it's possible 1218 // for all running threads to block them and delay their delivery until 1219 // we start a new thread. When linked into a C program we let the C code 1220 // decide on the disposition of those signals. 1221 func blockableSig(sig uint32) bool { 1222 flags := sigtable[sig].flags 1223 if flags&_SigUnblock != 0 { 1224 return false 1225 } 1226 if isarchive || islibrary { 1227 return true 1228 } 1229 return flags&(_SigKill|_SigThrow) == 0 1230 } 1231 1232 // gsignalStack saves the fields of the gsignal stack changed by 1233 // setGsignalStack. 1234 type gsignalStack struct { 1235 stack stack 1236 stackguard0 uintptr 1237 stackguard1 uintptr 1238 stktopsp uintptr 1239 } 1240 1241 // setGsignalStack sets the gsignal stack of the current m to an 1242 // alternate signal stack returned from the sigaltstack system call. 1243 // It saves the old values in *old for use by restoreGsignalStack. 1244 // This is used when handling a signal if non-Go code has set the 1245 // alternate signal stack. 1246 //go:nosplit 1247 //go:nowritebarrierrec 1248 func setGsignalStack(st *stackt, old *gsignalStack) { 1249 g := getg() 1250 if old != nil { 1251 old.stack = g.m.gsignal.stack 1252 old.stackguard0 = g.m.gsignal.stackguard0 1253 old.stackguard1 = g.m.gsignal.stackguard1 1254 old.stktopsp = g.m.gsignal.stktopsp 1255 } 1256 stsp := uintptr(unsafe.Pointer(st.ss_sp)) 1257 g.m.gsignal.stack.lo = stsp 1258 g.m.gsignal.stack.hi = stsp + st.ss_size 1259 g.m.gsignal.stackguard0 = stsp + _StackGuard 1260 g.m.gsignal.stackguard1 = stsp + _StackGuard 1261 } 1262 1263 // restoreGsignalStack restores the gsignal stack to the value it had 1264 // before entering the signal handler. 1265 //go:nosplit 1266 //go:nowritebarrierrec 1267 func restoreGsignalStack(st *gsignalStack) { 1268 gp := getg().m.gsignal 1269 gp.stack = st.stack 1270 gp.stackguard0 = st.stackguard0 1271 gp.stackguard1 = st.stackguard1 1272 gp.stktopsp = st.stktopsp 1273 } 1274 1275 // signalstack sets the current thread's alternate signal stack to s. 1276 //go:nosplit 1277 func signalstack(s *stack) { 1278 st := stackt{ss_size: s.hi - s.lo} 1279 setSignalstackSP(&st, s.lo) 1280 sigaltstack(&st, nil) 1281 } 1282 1283 // setsigsegv is used on darwin/arm64 to fake a segmentation fault. 1284 // 1285 // This is exported via linkname to assembly in runtime/cgo. 1286 // 1287 //go:nosplit 1288 //go:linkname setsigsegv 1289 func setsigsegv(pc uintptr) { 1290 g := getg() 1291 g.sig = _SIGSEGV 1292 g.sigpc = pc 1293 g.sigcode0 = _SEGV_MAPERR 1294 g.sigcode1 = 0 // TODO: emulate si_addr 1295 }