github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/mgcstack.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: stack objects and stack tracing 6 // See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing 7 // Also see issue 22350. 8 9 // Stack tracing solves the problem of determining which parts of the 10 // stack are live and should be scanned. It runs as part of scanning 11 // a single goroutine stack. 12 // 13 // Normally determining which parts of the stack are live is easy to 14 // do statically, as user code has explicit references (reads and 15 // writes) to stack variables. The compiler can do a simple dataflow 16 // analysis to determine liveness of stack variables at every point in 17 // the code. See cmd/compile/internal/gc/plive.go for that analysis. 18 // 19 // However, when we take the address of a stack variable, determining 20 // whether that variable is still live is less clear. We can still 21 // look for static accesses, but accesses through a pointer to the 22 // variable are difficult in general to track statically. That pointer 23 // can be passed among functions on the stack, conditionally retained, 24 // etc. 25 // 26 // Instead, we will track pointers to stack variables dynamically. 27 // All pointers to stack-allocated variables will themselves be on the 28 // stack somewhere (or in associated locations, like defer records), so 29 // we can find them all efficiently. 30 // 31 // Stack tracing is organized as a mini garbage collection tracing 32 // pass. The objects in this garbage collection are all the variables 33 // on the stack whose address is taken, and which themselves contain a 34 // pointer. We call these variables "stack objects". 35 // 36 // We begin by determining all the stack objects on the stack and all 37 // the statically live pointers that may point into the stack. We then 38 // process each pointer to see if it points to a stack object. If it 39 // does, we scan that stack object. It may contain pointers into the 40 // heap, in which case those pointers are passed to the main garbage 41 // collection. It may also contain pointers into the stack, in which 42 // case we add them to our set of stack pointers. 43 // 44 // Once we're done processing all the pointers (including the ones we 45 // added during processing), we've found all the stack objects that 46 // are live. Any dead stack objects are not scanned and their contents 47 // will not keep heap objects live. Unlike the main garbage 48 // collection, we can't sweep the dead stack objects; they live on in 49 // a moribund state until the stack frame that contains them is 50 // popped. 51 // 52 // A stack can look like this: 53 // 54 // +----------+ 55 // | foo() | 56 // | +------+ | 57 // | | A | | <---\ 58 // | +------+ | | 59 // | | | 60 // | +------+ | | 61 // | | B | | | 62 // | +------+ | | 63 // | | | 64 // +----------+ | 65 // | bar() | | 66 // | +------+ | | 67 // | | C | | <-\ | 68 // | +----|-+ | | | 69 // | | | | | 70 // | +----v-+ | | | 71 // | | D ---------/ 72 // | +------+ | | 73 // | | | 74 // +----------+ | 75 // | baz() | | 76 // | +------+ | | 77 // | | E -------/ 78 // | +------+ | 79 // | ^ | 80 // | F: --/ | 81 // | | 82 // +----------+ 83 // 84 // foo() calls bar() calls baz(). Each has a frame on the stack. 85 // foo() has stack objects A and B. 86 // bar() has stack objects C and D, with C pointing to D and D pointing to A. 87 // baz() has a stack object E pointing to C, and a local variable F pointing to E. 88 // 89 // Starting from the pointer in local variable F, we will eventually 90 // scan all of E, C, D, and A (in that order). B is never scanned 91 // because there is no live pointer to it. If B is also statically 92 // dead (meaning that foo() never accesses B again after it calls 93 // bar()), then B's pointers into the heap are not considered live. 94 95 package runtime 96 97 import ( 98 "internal/goarch" 99 "runtime/internal/sys" 100 "unsafe" 101 ) 102 103 const stackTraceDebug = false 104 105 // Buffer for pointers found during stack tracing. 106 // Must be smaller than or equal to workbuf. 107 type stackWorkBuf struct { 108 _ sys.NotInHeap 109 stackWorkBufHdr 110 obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr 111 } 112 113 // Header declaration must come after the buf declaration above, because of issue #14620. 114 type stackWorkBufHdr struct { 115 _ sys.NotInHeap 116 workbufhdr 117 next *stackWorkBuf // linked list of workbufs 118 // Note: we could theoretically repurpose lfnode.next as this next pointer. 119 // It would save 1 word, but that probably isn't worth busting open 120 // the lfnode API. 121 } 122 123 // Buffer for stack objects found on a goroutine stack. 124 // Must be smaller than or equal to workbuf. 125 type stackObjectBuf struct { 126 _ sys.NotInHeap 127 stackObjectBufHdr 128 obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject 129 } 130 131 type stackObjectBufHdr struct { 132 _ sys.NotInHeap 133 workbufhdr 134 next *stackObjectBuf 135 } 136 137 func init() { 138 if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { 139 panic("stackWorkBuf too big") 140 } 141 if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { 142 panic("stackObjectBuf too big") 143 } 144 } 145 146 // A stackObject represents a variable on the stack that has had 147 // its address taken. 148 type stackObject struct { 149 _ sys.NotInHeap 150 off uint32 // offset above stack.lo 151 size uint32 // size of object 152 r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned. 153 left *stackObject // objects with lower addresses 154 right *stackObject // objects with higher addresses 155 } 156 157 // obj.r = r, but with no write barrier. 158 // 159 //go:nowritebarrier 160 func (obj *stackObject) setRecord(r *stackObjectRecord) { 161 // Types of stack objects are always in read-only memory, not the heap. 162 // So not using a write barrier is ok. 163 *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r)) 164 } 165 166 // A stackScanState keeps track of the state used during the GC walk 167 // of a goroutine. 168 type stackScanState struct { 169 cache pcvalueCache 170 171 // stack limits 172 stack stack 173 174 // conservative indicates that the next frame must be scanned conservatively. 175 // This applies only to the innermost frame at an async safe-point. 176 conservative bool 177 178 // buf contains the set of possible pointers to stack objects. 179 // Organized as a LIFO linked list of buffers. 180 // All buffers except possibly the head buffer are full. 181 buf *stackWorkBuf 182 freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis 183 184 // cbuf contains conservative pointers to stack objects. If 185 // all pointers to a stack object are obtained via 186 // conservative scanning, then the stack object may be dead 187 // and may contain dead pointers, so it must be scanned 188 // defensively. 189 cbuf *stackWorkBuf 190 191 // list of stack objects 192 // Objects are in increasing address order. 193 head *stackObjectBuf 194 tail *stackObjectBuf 195 nobjs int 196 197 // root of binary tree for fast object lookup by address 198 // Initialized by buildIndex. 199 root *stackObject 200 } 201 202 // Add p as a potential pointer to a stack object. 203 // p must be a stack address. 204 func (s *stackScanState) putPtr(p uintptr, conservative bool) { 205 if p < s.stack.lo || p >= s.stack.hi { 206 throw("address not a stack address") 207 } 208 head := &s.buf 209 if conservative { 210 head = &s.cbuf 211 } 212 buf := *head 213 if buf == nil { 214 // Initial setup. 215 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 216 buf.nobj = 0 217 buf.next = nil 218 *head = buf 219 } else if buf.nobj == len(buf.obj) { 220 if s.freeBuf != nil { 221 buf = s.freeBuf 222 s.freeBuf = nil 223 } else { 224 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 225 } 226 buf.nobj = 0 227 buf.next = *head 228 *head = buf 229 } 230 buf.obj[buf.nobj] = p 231 buf.nobj++ 232 } 233 234 // Remove and return a potential pointer to a stack object. 235 // Returns 0 if there are no more pointers available. 236 // 237 // This prefers non-conservative pointers so we scan stack objects 238 // precisely if there are any non-conservative pointers to them. 239 func (s *stackScanState) getPtr() (p uintptr, conservative bool) { 240 for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} { 241 buf := *head 242 if buf == nil { 243 // Never had any data. 244 continue 245 } 246 if buf.nobj == 0 { 247 if s.freeBuf != nil { 248 // Free old freeBuf. 249 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 250 } 251 // Move buf to the freeBuf. 252 s.freeBuf = buf 253 buf = buf.next 254 *head = buf 255 if buf == nil { 256 // No more data in this list. 257 continue 258 } 259 } 260 buf.nobj-- 261 return buf.obj[buf.nobj], head == &s.cbuf 262 } 263 // No more data in either list. 264 if s.freeBuf != nil { 265 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 266 s.freeBuf = nil 267 } 268 return 0, false 269 } 270 271 // addObject adds a stack object at addr of type typ to the set of stack objects. 272 func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) { 273 x := s.tail 274 if x == nil { 275 // initial setup 276 x = (*stackObjectBuf)(unsafe.Pointer(getempty())) 277 x.next = nil 278 s.head = x 279 s.tail = x 280 } 281 if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { 282 throw("objects added out of order or overlapping") 283 } 284 if x.nobj == len(x.obj) { 285 // full buffer - allocate a new buffer, add to end of linked list 286 y := (*stackObjectBuf)(unsafe.Pointer(getempty())) 287 y.next = nil 288 x.next = y 289 s.tail = y 290 x = y 291 } 292 obj := &x.obj[x.nobj] 293 x.nobj++ 294 obj.off = uint32(addr - s.stack.lo) 295 obj.size = uint32(r.size) 296 obj.setRecord(r) 297 // obj.left and obj.right will be initialized by buildIndex before use. 298 s.nobjs++ 299 } 300 301 // buildIndex initializes s.root to a binary search tree. 302 // It should be called after all addObject calls but before 303 // any call of findObject. 304 func (s *stackScanState) buildIndex() { 305 s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) 306 } 307 308 // Build a binary search tree with the n objects in the list 309 // x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... 310 // Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. 311 // (The first object that was not included in the binary search tree.) 312 // If n == 0, returns nil, x. 313 func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { 314 if n == 0 { 315 return nil, x, idx 316 } 317 var left, right *stackObject 318 left, x, idx = binarySearchTree(x, idx, n/2) 319 root = &x.obj[idx] 320 idx++ 321 if idx == len(x.obj) { 322 x = x.next 323 idx = 0 324 } 325 right, x, idx = binarySearchTree(x, idx, n-n/2-1) 326 root.left = left 327 root.right = right 328 return root, x, idx 329 } 330 331 // findObject returns the stack object containing address a, if any. 332 // Must have called buildIndex previously. 333 func (s *stackScanState) findObject(a uintptr) *stackObject { 334 off := uint32(a - s.stack.lo) 335 obj := s.root 336 for { 337 if obj == nil { 338 return nil 339 } 340 if off < obj.off { 341 obj = obj.left 342 continue 343 } 344 if off >= obj.off+obj.size { 345 obj = obj.right 346 continue 347 } 348 return obj 349 } 350 }