github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/debugcall.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build amd64 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 debugCallSystemStack = "executing on Go runtime stack" 13 debugCallUnknownFunc = "call from unknown function" 14 debugCallRuntime = "call from within the Go runtime" 15 debugCallUnsafePoint = "call not at safe point" 16 ) 17 18 func debugCallV1() 19 func debugCallPanicked(val interface{}) 20 21 // debugCallCheck checks whether it is safe to inject a debugger 22 // function call with return PC pc. If not, it returns a string 23 // explaining why. 24 // 25 //go:nosplit 26 func debugCallCheck(pc uintptr) string { 27 // No user calls from the system stack. 28 if getg() != getg().m.curg { 29 return debugCallSystemStack 30 } 31 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { 32 // Fast syscalls (nanotime) and racecall switch to the 33 // g0 stack without switching g. We can't safely make 34 // a call in this state. (We can't even safely 35 // systemstack.) 36 return debugCallSystemStack 37 } 38 39 // Switch to the system stack to avoid overflowing the user 40 // stack. 41 var ret string 42 systemstack(func() { 43 f := findfunc(pc) 44 if !f.valid() { 45 ret = debugCallUnknownFunc 46 return 47 } 48 49 name := funcname(f) 50 51 switch name { 52 case "debugCall32", 53 "debugCall64", 54 "debugCall128", 55 "debugCall256", 56 "debugCall512", 57 "debugCall1024", 58 "debugCall2048", 59 "debugCall4096", 60 "debugCall8192", 61 "debugCall16384", 62 "debugCall32768", 63 "debugCall65536": 64 // These functions are allowed so that the debugger can initiate multiple function calls. 65 // See: https://golang.org/cl/161137/ 66 return 67 } 68 69 // Disallow calls from the runtime. We could 70 // potentially make this condition tighter (e.g., not 71 // when locks are held), but there are enough tightly 72 // coded sequences (e.g., defer handling) that it's 73 // better to play it safe. 74 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx { 75 ret = debugCallRuntime 76 return 77 } 78 79 // Check that this isn't an unsafe-point. 80 if pc != f.entry { 81 pc-- 82 } 83 up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil) 84 if up != _PCDATA_UnsafePointSafe { 85 // Not at a safe point. 86 ret = debugCallUnsafePoint 87 } 88 }) 89 return ret 90 } 91 92 // debugCallWrap starts a new goroutine to run a debug call and blocks 93 // the calling goroutine. On the goroutine, it prepares to recover 94 // panics from the debug call, and then calls the call dispatching 95 // function at PC dispatch. 96 // 97 // This must be deeply nosplit because there are untyped values on the 98 // stack from debugCallV1. 99 // 100 //go:nosplit 101 func debugCallWrap(dispatch uintptr) { 102 var lockedm bool 103 var lockedExt uint32 104 callerpc := getcallerpc() 105 gp := getg() 106 107 // Create a new goroutine to execute the call on. Run this on 108 // the system stack to avoid growing our stack. 109 systemstack(func() { 110 var args struct { 111 dispatch uintptr 112 callingG *g 113 } 114 args.dispatch = dispatch 115 args.callingG = gp 116 fn := debugCallWrap1 117 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), unsafe.Pointer(&args), int32(unsafe.Sizeof(args)), gp, callerpc) 118 119 // If the current G is locked, then transfer that 120 // locked-ness to the new goroutine. 121 if gp.lockedm != 0 { 122 // Save lock state to restore later. 123 mp := gp.m 124 if mp != gp.lockedm.ptr() { 125 throw("inconsistent lockedm") 126 } 127 128 lockedm = true 129 lockedExt = mp.lockedExt 130 131 // Transfer external lock count to internal so 132 // it can't be unlocked from the debug call. 133 mp.lockedInt++ 134 mp.lockedExt = 0 135 136 mp.lockedg.set(newg) 137 newg.lockedm.set(mp) 138 gp.lockedm = 0 139 } 140 141 // Mark the calling goroutine as being at an async 142 // safe-point, since it has a few conservative frames 143 // at the bottom of the stack. This also prevents 144 // stack shrinks. 145 gp.asyncSafePoint = true 146 147 // Stash newg away so we can execute it below (mcall's 148 // closure can't capture anything). 149 gp.schedlink.set(newg) 150 }) 151 152 // Switch to the new goroutine. 153 mcall(func(gp *g) { 154 // Get newg. 155 newg := gp.schedlink.ptr() 156 gp.schedlink = 0 157 158 // Park the calling goroutine. 159 gp.waitreason = waitReasonDebugCall 160 if trace.enabled { 161 traceGoPark(traceEvGoBlock, 1) 162 } 163 casgstatus(gp, _Grunning, _Gwaiting) 164 dropg() 165 166 // Directly execute the new goroutine. The debug 167 // protocol will continue on the new goroutine, so 168 // it's important we not just let the scheduler do 169 // this or it may resume a different goroutine. 170 execute(newg, true) 171 }) 172 173 // We'll resume here when the call returns. 174 175 // Restore locked state. 176 if lockedm { 177 mp := gp.m 178 mp.lockedExt = lockedExt 179 mp.lockedInt-- 180 mp.lockedg.set(gp) 181 gp.lockedm.set(mp) 182 } 183 184 gp.asyncSafePoint = false 185 } 186 187 // debugCallWrap1 is the continuation of debugCallWrap on the callee 188 // goroutine. 189 func debugCallWrap1(dispatch uintptr, callingG *g) { 190 // Dispatch call and trap panics. 191 debugCallWrap2(dispatch) 192 193 // Resume the caller goroutine. 194 getg().schedlink.set(callingG) 195 mcall(func(gp *g) { 196 callingG := gp.schedlink.ptr() 197 gp.schedlink = 0 198 199 // Unlock this goroutine from the M if necessary. The 200 // calling G will relock. 201 if gp.lockedm != 0 { 202 gp.lockedm = 0 203 gp.m.lockedg = 0 204 } 205 206 // Switch back to the calling goroutine. At some point 207 // the scheduler will schedule us again and we'll 208 // finish exiting. 209 if trace.enabled { 210 traceGoSched() 211 } 212 casgstatus(gp, _Grunning, _Grunnable) 213 dropg() 214 lock(&sched.lock) 215 globrunqput(gp) 216 unlock(&sched.lock) 217 218 if trace.enabled { 219 traceGoUnpark(callingG, 0) 220 } 221 casgstatus(callingG, _Gwaiting, _Grunnable) 222 execute(callingG, true) 223 }) 224 } 225 226 func debugCallWrap2(dispatch uintptr) { 227 // Call the dispatch function and trap panics. 228 var dispatchF func() 229 dispatchFV := funcval{dispatch} 230 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV)) 231 232 var ok bool 233 defer func() { 234 if !ok { 235 err := recover() 236 debugCallPanicked(err) 237 } 238 }() 239 dispatchF() 240 ok = true 241 }