github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/debugcall.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build amd64 || arm64 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 debugCallSystemStack = "executing on Go runtime stack" 13 debugCallUnknownFunc = "call from unknown function" 14 debugCallRuntime = "call from within the Go runtime" 15 debugCallUnsafePoint = "call not at safe point" 16 ) 17 18 func debugCallV2() 19 func debugCallPanicked(val any) 20 21 // debugCallCheck checks whether it is safe to inject a debugger 22 // function call with return PC pc. If not, it returns a string 23 // explaining why. 24 // 25 //go:nosplit 26 func debugCallCheck(pc uintptr) string { 27 // No user calls from the system stack. 28 if getg() != getg().m.curg { 29 return debugCallSystemStack 30 } 31 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { 32 // Fast syscalls (nanotime) and racecall switch to the 33 // g0 stack without switching g. We can't safely make 34 // a call in this state. (We can't even safely 35 // systemstack.) 36 return debugCallSystemStack 37 } 38 39 // Switch to the system stack to avoid overflowing the user 40 // stack. 41 var ret string 42 systemstack(func() { 43 f := findfunc(pc) 44 if !f.valid() { 45 ret = debugCallUnknownFunc 46 return 47 } 48 49 name := funcname(f) 50 51 switch name { 52 case "debugCall32", 53 "debugCall64", 54 "debugCall128", 55 "debugCall256", 56 "debugCall512", 57 "debugCall1024", 58 "debugCall2048", 59 "debugCall4096", 60 "debugCall8192", 61 "debugCall16384", 62 "debugCall32768", 63 "debugCall65536": 64 // These functions are allowed so that the debugger can initiate multiple function calls. 65 // See: https://golang.org/cl/161137/ 66 return 67 } 68 69 // Disallow calls from the runtime. We could 70 // potentially make this condition tighter (e.g., not 71 // when locks are held), but there are enough tightly 72 // coded sequences (e.g., defer handling) that it's 73 // better to play it safe. 74 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx { 75 ret = debugCallRuntime 76 return 77 } 78 79 // Check that this isn't an unsafe-point. 80 if pc != f.entry() { 81 pc-- 82 } 83 up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil) 84 if up != _PCDATA_UnsafePointSafe { 85 // Not at a safe point. 86 ret = debugCallUnsafePoint 87 } 88 }) 89 return ret 90 } 91 92 // debugCallWrap starts a new goroutine to run a debug call and blocks 93 // the calling goroutine. On the goroutine, it prepares to recover 94 // panics from the debug call, and then calls the call dispatching 95 // function at PC dispatch. 96 // 97 // This must be deeply nosplit because there are untyped values on the 98 // stack from debugCallV2. 99 // 100 //go:nosplit 101 func debugCallWrap(dispatch uintptr) { 102 var lockedm bool 103 var lockedExt uint32 104 callerpc := getcallerpc() 105 gp := getg() 106 107 // Create a new goroutine to execute the call on. Run this on 108 // the system stack to avoid growing our stack. 109 systemstack(func() { 110 // TODO(mknyszek): It would be nice to wrap these arguments in an allocated 111 // closure and start the goroutine with that closure, but the compiler disallows 112 // implicit closure allocation in the runtime. 113 fn := debugCallWrap1 114 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc) 115 args := &debugCallWrapArgs{ 116 dispatch: dispatch, 117 callingG: gp, 118 } 119 newg.param = unsafe.Pointer(args) 120 121 // If the current G is locked, then transfer that 122 // locked-ness to the new goroutine. 123 if gp.lockedm != 0 { 124 // Save lock state to restore later. 125 mp := gp.m 126 if mp != gp.lockedm.ptr() { 127 throw("inconsistent lockedm") 128 } 129 130 lockedm = true 131 lockedExt = mp.lockedExt 132 133 // Transfer external lock count to internal so 134 // it can't be unlocked from the debug call. 135 mp.lockedInt++ 136 mp.lockedExt = 0 137 138 mp.lockedg.set(newg) 139 newg.lockedm.set(mp) 140 gp.lockedm = 0 141 } 142 143 // Mark the calling goroutine as being at an async 144 // safe-point, since it has a few conservative frames 145 // at the bottom of the stack. This also prevents 146 // stack shrinks. 147 gp.asyncSafePoint = true 148 149 // Stash newg away so we can execute it below (mcall's 150 // closure can't capture anything). 151 gp.schedlink.set(newg) 152 }) 153 154 // Switch to the new goroutine. 155 mcall(func(gp *g) { 156 // Get newg. 157 newg := gp.schedlink.ptr() 158 gp.schedlink = 0 159 160 // Park the calling goroutine. 161 if trace.enabled { 162 traceGoPark(traceEvGoBlock, 1) 163 } 164 casGToWaiting(gp, _Grunning, waitReasonDebugCall) 165 dropg() 166 167 // Directly execute the new goroutine. The debug 168 // protocol will continue on the new goroutine, so 169 // it's important we not just let the scheduler do 170 // this or it may resume a different goroutine. 171 execute(newg, true) 172 }) 173 174 // We'll resume here when the call returns. 175 176 // Restore locked state. 177 if lockedm { 178 mp := gp.m 179 mp.lockedExt = lockedExt 180 mp.lockedInt-- 181 mp.lockedg.set(gp) 182 gp.lockedm.set(mp) 183 } 184 185 gp.asyncSafePoint = false 186 } 187 188 type debugCallWrapArgs struct { 189 dispatch uintptr 190 callingG *g 191 } 192 193 // debugCallWrap1 is the continuation of debugCallWrap on the callee 194 // goroutine. 195 func debugCallWrap1() { 196 gp := getg() 197 args := (*debugCallWrapArgs)(gp.param) 198 dispatch, callingG := args.dispatch, args.callingG 199 gp.param = nil 200 201 // Dispatch call and trap panics. 202 debugCallWrap2(dispatch) 203 204 // Resume the caller goroutine. 205 getg().schedlink.set(callingG) 206 mcall(func(gp *g) { 207 callingG := gp.schedlink.ptr() 208 gp.schedlink = 0 209 210 // Unlock this goroutine from the M if necessary. The 211 // calling G will relock. 212 if gp.lockedm != 0 { 213 gp.lockedm = 0 214 gp.m.lockedg = 0 215 } 216 217 // Switch back to the calling goroutine. At some point 218 // the scheduler will schedule us again and we'll 219 // finish exiting. 220 if trace.enabled { 221 traceGoSched() 222 } 223 casgstatus(gp, _Grunning, _Grunnable) 224 dropg() 225 lock(&sched.lock) 226 globrunqput(gp) 227 unlock(&sched.lock) 228 229 if trace.enabled { 230 traceGoUnpark(callingG, 0) 231 } 232 casgstatus(callingG, _Gwaiting, _Grunnable) 233 execute(callingG, true) 234 }) 235 } 236 237 func debugCallWrap2(dispatch uintptr) { 238 // Call the dispatch function and trap panics. 239 var dispatchF func() 240 dispatchFV := funcval{dispatch} 241 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV)) 242 243 var ok bool 244 defer func() { 245 if !ok { 246 err := recover() 247 debugCallPanicked(err) 248 } 249 }() 250 dispatchF() 251 ok = true 252 }