github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/debugcall.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build amd64 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 debugCallSystemStack = "executing on Go runtime stack" 13 debugCallUnknownFunc = "call from unknown function" 14 debugCallRuntime = "call from within the Go runtime" 15 debugCallUnsafePoint = "call not at safe point" 16 ) 17 18 func debugCallV2() 19 func debugCallPanicked(val any) 20 21 // debugCallCheck checks whether it is safe to inject a debugger 22 // function call with return PC pc. If not, it returns a string 23 // explaining why. 24 // 25 //go:nosplit 26 func debugCallCheck(pc uintptr) string { 27 // No user calls from the system stack. 28 if getg() != getg().m.curg { 29 return debugCallSystemStack 30 } 31 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { 32 // Fast syscalls (nanotime) and racecall switch to the 33 // g0 stack without switching g. We can't safely make 34 // a call in this state. (We can't even safely 35 // systemstack.) 36 return debugCallSystemStack 37 } 38 39 // Switch to the system stack to avoid overflowing the user 40 // stack. 41 var ret string 42 systemstack(func() { 43 f := findfunc(pc) 44 if !f.valid() { 45 ret = debugCallUnknownFunc 46 return 47 } 48 49 name := funcname(f) 50 51 switch name { 52 case "debugCall32", 53 "debugCall64", 54 "debugCall128", 55 "debugCall256", 56 "debugCall512", 57 "debugCall1024", 58 "debugCall2048", 59 "debugCall4096", 60 "debugCall8192", 61 "debugCall16384", 62 "debugCall32768", 63 "debugCall65536": 64 // These functions are allowed so that the debugger can initiate multiple function calls. 65 // See: https://golang.org/cl/161137/ 66 return 67 } 68 69 // Disallow calls from the runtime. We could 70 // potentially make this condition tighter (e.g., not 71 // when locks are held), but there are enough tightly 72 // coded sequences (e.g., defer handling) that it's 73 // better to play it safe. 74 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx { 75 ret = debugCallRuntime 76 return 77 } 78 79 // Check that this isn't an unsafe-point. 80 if pc != f.entry() { 81 pc-- 82 } 83 up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil) 84 if up != _PCDATA_UnsafePointSafe { 85 // Not at a safe point. 86 ret = debugCallUnsafePoint 87 } 88 }) 89 return ret 90 } 91 92 // debugCallWrap starts a new goroutine to run a debug call and blocks 93 // the calling goroutine. On the goroutine, it prepares to recover 94 // panics from the debug call, and then calls the call dispatching 95 // function at PC dispatch. 96 // 97 // This must be deeply nosplit because there are untyped values on the 98 // stack from debugCallV2. 99 // 100 //go:nosplit 101 func debugCallWrap(dispatch uintptr) { 102 var lockedm bool 103 var lockedExt uint32 104 callerpc := getcallerpc() 105 gp := getg() 106 107 // Create a new goroutine to execute the call on. Run this on 108 // the system stack to avoid growing our stack. 109 systemstack(func() { 110 // TODO(mknyszek): It would be nice to wrap these arguments in an allocated 111 // closure and start the goroutine with that closure, but the compiler disallows 112 // implicit closure allocation in the runtime. 113 fn := debugCallWrap1 114 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc) 115 args := &debugCallWrapArgs{ 116 dispatch: dispatch, 117 callingG: gp, 118 } 119 newg.param = unsafe.Pointer(args) 120 121 // If the current G is locked, then transfer that 122 // locked-ness to the new goroutine. 123 if gp.lockedm != 0 { 124 // Save lock state to restore later. 125 mp := gp.m 126 if mp != gp.lockedm.ptr() { 127 throw("inconsistent lockedm") 128 } 129 130 lockedm = true 131 lockedExt = mp.lockedExt 132 133 // Transfer external lock count to internal so 134 // it can't be unlocked from the debug call. 135 mp.lockedInt++ 136 mp.lockedExt = 0 137 138 mp.lockedg.set(newg) 139 newg.lockedm.set(mp) 140 gp.lockedm = 0 141 } 142 143 // Mark the calling goroutine as being at an async 144 // safe-point, since it has a few conservative frames 145 // at the bottom of the stack. This also prevents 146 // stack shrinks. 147 gp.asyncSafePoint = true 148 149 // Stash newg away so we can execute it below (mcall's 150 // closure can't capture anything). 151 gp.schedlink.set(newg) 152 }) 153 154 // Switch to the new goroutine. 155 mcall(func(gp *g) { 156 // Get newg. 157 newg := gp.schedlink.ptr() 158 gp.schedlink = 0 159 160 // Park the calling goroutine. 161 gp.waitreason = waitReasonDebugCall 162 if trace.enabled { 163 traceGoPark(traceEvGoBlock, 1) 164 } 165 casgstatus(gp, _Grunning, _Gwaiting) 166 dropg() 167 168 // Directly execute the new goroutine. The debug 169 // protocol will continue on the new goroutine, so 170 // it's important we not just let the scheduler do 171 // this or it may resume a different goroutine. 172 execute(newg, true) 173 }) 174 175 // We'll resume here when the call returns. 176 177 // Restore locked state. 178 if lockedm { 179 mp := gp.m 180 mp.lockedExt = lockedExt 181 mp.lockedInt-- 182 mp.lockedg.set(gp) 183 gp.lockedm.set(mp) 184 } 185 186 gp.asyncSafePoint = false 187 } 188 189 type debugCallWrapArgs struct { 190 dispatch uintptr 191 callingG *g 192 } 193 194 // debugCallWrap1 is the continuation of debugCallWrap on the callee 195 // goroutine. 196 func debugCallWrap1() { 197 gp := getg() 198 args := (*debugCallWrapArgs)(gp.param) 199 dispatch, callingG := args.dispatch, args.callingG 200 gp.param = nil 201 202 // Dispatch call and trap panics. 203 debugCallWrap2(dispatch) 204 205 // Resume the caller goroutine. 206 getg().schedlink.set(callingG) 207 mcall(func(gp *g) { 208 callingG := gp.schedlink.ptr() 209 gp.schedlink = 0 210 211 // Unlock this goroutine from the M if necessary. The 212 // calling G will relock. 213 if gp.lockedm != 0 { 214 gp.lockedm = 0 215 gp.m.lockedg = 0 216 } 217 218 // Switch back to the calling goroutine. At some point 219 // the scheduler will schedule us again and we'll 220 // finish exiting. 221 if trace.enabled { 222 traceGoSched() 223 } 224 casgstatus(gp, _Grunning, _Grunnable) 225 dropg() 226 lock(&sched.lock) 227 globrunqput(gp) 228 unlock(&sched.lock) 229 230 if trace.enabled { 231 traceGoUnpark(callingG, 0) 232 } 233 casgstatus(callingG, _Gwaiting, _Grunnable) 234 execute(callingG, true) 235 }) 236 } 237 238 func debugCallWrap2(dispatch uintptr) { 239 // Call the dispatch function and trap panics. 240 var dispatchF func() 241 dispatchFV := funcval{dispatch} 242 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV)) 243 244 var ok bool 245 defer func() { 246 if !ok { 247 err := recover() 248 debugCallPanicked(err) 249 } 250 }() 251 dispatchF() 252 ok = true 253 }