inet.af/netstack@v0.0.0-20220214151720-7585b01ddccf/refs/refcounter.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package refs defines an interface for reference counted objects. It 16 // also provides a drop-in implementation called AtomicRefCount. 17 package refs 18 19 import ( 20 "bytes" 21 "fmt" 22 "reflect" 23 "runtime" 24 "sync/atomic" 25 26 "inet.af/netstack/context" 27 "inet.af/netstack/log" 28 "inet.af/netstack/sync" 29 ) 30 31 // RefCounter is the interface to be implemented by objects that are reference 32 // counted. 33 // 34 // TODO(gvisor.dev/issue/1624): Get rid of most of this package and replace it 35 // with refsvfs2. 36 type RefCounter interface { 37 // IncRef increments the reference counter on the object. 38 IncRef() 39 40 // DecRef decrements the reference counter on the object. 41 // 42 // Note that AtomicRefCounter.DecRef() does not support destructors. 43 // If a type has a destructor, it must implement its own DecRef() 44 // method and call AtomicRefCounter.DecRefWithDestructor(destructor). 45 DecRef(ctx context.Context) 46 47 // TryIncRef attempts to increase the reference counter on the object, 48 // but may fail if all references have already been dropped. This 49 // should be used only in special circumstances, such as WeakRefs. 50 TryIncRef() bool 51 52 // addWeakRef adds the given weak reference. Note that you should have a 53 // reference to the object when calling this method. 54 addWeakRef(*WeakRef) 55 56 // dropWeakRef drops the given weak reference. Note that you should have 57 // a reference to the object when calling this method. 58 dropWeakRef(*WeakRef) 59 } 60 61 // A WeakRefUser is notified when the last non-weak reference is dropped. 62 type WeakRefUser interface { 63 // WeakRefGone is called when the last non-weak reference is dropped. 64 WeakRefGone(ctx context.Context) 65 } 66 67 // WeakRef is a weak reference. 68 // 69 // +stateify savable 70 type WeakRef struct { 71 weakRefEntry `state:"nosave"` 72 73 // obj is an atomic value that points to the refCounter. 74 obj atomic.Value `state:".(savedReference)"` 75 76 // user is notified when the weak ref is zapped by the object getting 77 // destroyed. 78 user WeakRefUser 79 } 80 81 // weakRefPool is a pool of weak references to avoid allocations on the hot path. 82 var weakRefPool = sync.Pool{ 83 New: func() interface{} { 84 return &WeakRef{} 85 }, 86 } 87 88 // NewWeakRef acquires a weak reference for the given object. 89 // 90 // An optional user will be notified when the last non-weak reference is 91 // dropped. 92 // 93 // Note that you must hold a reference to the object prior to getting a weak 94 // reference. (But you may drop the non-weak reference after that.) 95 func NewWeakRef(rc RefCounter, u WeakRefUser) *WeakRef { 96 w := weakRefPool.Get().(*WeakRef) 97 w.init(rc, u) 98 return w 99 } 100 101 // get attempts to get a normal reference to the underlying object, and returns 102 // the object. If this weak reference has already been zapped (the object has 103 // been destroyed) then false is returned. If the object still exists, then 104 // true is returned. 105 func (w *WeakRef) get() (RefCounter, bool) { 106 rc := w.obj.Load().(RefCounter) 107 if v := reflect.ValueOf(rc); v == reflect.Zero(v.Type()) { 108 // This pointer has already been zapped by zap() below. We do 109 // this to ensure that the GC can collect the underlying 110 // RefCounter objects and they don't hog resources. 111 return nil, false 112 } 113 if !rc.TryIncRef() { 114 return nil, true 115 } 116 return rc, true 117 } 118 119 // Get attempts to get a normal reference to the underlying object, and returns 120 // the object. If this fails (the object no longer exists), then nil will be 121 // returned instead. 122 func (w *WeakRef) Get() RefCounter { 123 rc, _ := w.get() 124 return rc 125 } 126 127 // Drop drops this weak reference. You should always call drop when you are 128 // finished with the weak reference. You may not use this object after calling 129 // drop. 130 func (w *WeakRef) Drop(ctx context.Context) { 131 rc, ok := w.get() 132 if !ok { 133 // We've been zapped already. When the refcounter has called 134 // zap, we're guaranteed it's not holding references. 135 weakRefPool.Put(w) 136 return 137 } 138 if rc == nil { 139 // The object is in the process of being destroyed. We can't 140 // remove this from the object's list, nor can we return this 141 // object to the pool. It'll just be garbage collected. This is 142 // a rare edge case, so it's not a big deal. 143 return 144 } 145 146 // At this point, we have a reference on the object. So destruction 147 // of the object (and zapping this weak reference) can't race here. 148 rc.dropWeakRef(w) 149 150 // And now aren't on the object's list of weak references. So it won't 151 // zap us if this causes the reference count to drop to zero. 152 rc.DecRef(ctx) 153 154 // Return to the pool. 155 weakRefPool.Put(w) 156 } 157 158 // init initializes this weak reference. 159 func (w *WeakRef) init(rc RefCounter, u WeakRefUser) { 160 // Reset the contents of the weak reference. 161 // This is important because we are reseting the atomic value type. 162 // Otherwise, we could panic here if obj is different than what it was 163 // the last time this was used. 164 *w = WeakRef{} 165 w.user = u 166 w.obj.Store(rc) 167 168 // In the load path, we may already have a nil value. So we need to 169 // check whether or not that is the case before calling addWeakRef. 170 if v := reflect.ValueOf(rc); v != reflect.Zero(v.Type()) { 171 rc.addWeakRef(w) 172 } 173 } 174 175 // zap zaps this weak reference. 176 func (w *WeakRef) zap() { 177 // We need to be careful about types here. 178 // So reflect is involved. But it's not that bad. 179 rc := w.obj.Load() 180 typ := reflect.TypeOf(rc) 181 w.obj.Store(reflect.Zero(typ).Interface()) 182 } 183 184 // AtomicRefCount keeps a reference count using atomic operations and calls the 185 // destructor when the count reaches zero. 186 // 187 // Do not use AtomicRefCount for new ref-counted objects! It is deprecated in 188 // favor of the refsvfs2 package. 189 // 190 // N.B. To allow the zero-object to be initialized, the count is offset by 191 // 1, that is, when refCount is n, there are really n+1 references. 192 // 193 // +stateify savable 194 type AtomicRefCount struct { 195 // refCount is composed of two fields: 196 // 197 // [32-bit speculative references]:[32-bit real references] 198 // 199 // Speculative references are used for TryIncRef, to avoid a 200 // CompareAndSwap loop. See IncRef, DecRef and TryIncRef for details of 201 // how these fields are used. 202 refCount int64 203 204 // name is the name of the type which owns this ref count. 205 // 206 // name is immutable after EnableLeakCheck is called. 207 name string 208 209 // stack optionally records the caller of EnableLeakCheck. 210 // 211 // stack is immutable after EnableLeakCheck is called. 212 stack []uintptr 213 214 // mu protects the list below. 215 mu sync.Mutex `state:"nosave"` 216 217 // weakRefs is our collection of weak references. 218 weakRefs weakRefList `state:"nosave"` 219 } 220 221 // LeakMode configures the leak checker. 222 type LeakMode uint32 223 224 // TODO(gvisor.dev/issue/1624): Simplify down to two modes (on/off) once vfs1 225 // ref counting is gone. 226 const ( 227 // UninitializedLeakChecking indicates that the leak checker has not yet been initialized. 228 UninitializedLeakChecking LeakMode = iota 229 230 // NoLeakChecking indicates that no effort should be made to check for 231 // leaks. 232 NoLeakChecking 233 234 // LeaksLogWarning indicates that a warning should be logged when leaks 235 // are found. 236 LeaksLogWarning 237 238 // LeaksLogTraces indicates that a trace collected during allocation 239 // should be logged when leaks are found. 240 LeaksLogTraces 241 ) 242 243 // Set implements flag.Value. 244 func (l *LeakMode) Set(v string) error { 245 switch v { 246 case "disabled": 247 *l = NoLeakChecking 248 case "log-names": 249 *l = LeaksLogWarning 250 case "log-traces": 251 *l = LeaksLogTraces 252 default: 253 return fmt.Errorf("invalid ref leak mode %q", v) 254 } 255 return nil 256 } 257 258 // Get implements flag.Value. 259 func (l *LeakMode) Get() interface{} { 260 return *l 261 } 262 263 // String implements flag.Value. 264 func (l LeakMode) String() string { 265 switch l { 266 case UninitializedLeakChecking: 267 return "uninitialized" 268 case NoLeakChecking: 269 return "disabled" 270 case LeaksLogWarning: 271 return "log-names" 272 case LeaksLogTraces: 273 return "log-traces" 274 } 275 panic(fmt.Sprintf("invalid ref leak mode %d", l)) 276 } 277 278 // leakMode stores the current mode for the reference leak checker. 279 // 280 // Values must be one of the LeakMode values. 281 // 282 // leakMode must be accessed atomically. 283 var leakMode uint32 284 285 // SetLeakMode configures the reference leak checker. 286 func SetLeakMode(mode LeakMode) { 287 atomic.StoreUint32(&leakMode, uint32(mode)) 288 } 289 290 // GetLeakMode returns the current leak mode. 291 func GetLeakMode() LeakMode { 292 return LeakMode(atomic.LoadUint32(&leakMode)) 293 } 294 295 const maxStackFrames = 40 296 297 type fileLine struct { 298 file string 299 line int 300 } 301 302 // A stackKey is a representation of a stack frame for use as a map key. 303 // 304 // The fileLine type is used as PC values seem to vary across collections, even 305 // for the same call stack. 306 type stackKey [maxStackFrames]fileLine 307 308 var stackCache = struct { 309 sync.Mutex 310 entries map[stackKey][]uintptr 311 }{entries: map[stackKey][]uintptr{}} 312 313 func makeStackKey(pcs []uintptr) stackKey { 314 frames := runtime.CallersFrames(pcs) 315 var key stackKey 316 keySlice := key[:0] 317 for { 318 frame, more := frames.Next() 319 keySlice = append(keySlice, fileLine{frame.File, frame.Line}) 320 321 if !more || len(keySlice) == len(key) { 322 break 323 } 324 } 325 return key 326 } 327 328 // RecordStack constructs and returns the PCs on the current stack. 329 func RecordStack() []uintptr { 330 pcs := make([]uintptr, maxStackFrames) 331 n := runtime.Callers(1, pcs) 332 if n == 0 { 333 // No pcs available. Stop now. 334 // 335 // This can happen if the first argument to runtime.Callers 336 // is large. 337 return nil 338 } 339 pcs = pcs[:n] 340 key := makeStackKey(pcs) 341 stackCache.Lock() 342 v, ok := stackCache.entries[key] 343 if !ok { 344 // Reallocate to prevent pcs from escaping. 345 v = append([]uintptr(nil), pcs...) 346 stackCache.entries[key] = v 347 } 348 stackCache.Unlock() 349 return v 350 } 351 352 // FormatStack converts the given stack into a readable format. 353 func FormatStack(pcs []uintptr) string { 354 frames := runtime.CallersFrames(pcs) 355 var trace bytes.Buffer 356 for { 357 frame, more := frames.Next() 358 fmt.Fprintf(&trace, "%s:%d: %s\n", frame.File, frame.Line, frame.Function) 359 360 if !more { 361 break 362 } 363 } 364 return trace.String() 365 } 366 367 func (r *AtomicRefCount) finalize() { 368 var note string 369 switch LeakMode(atomic.LoadUint32(&leakMode)) { 370 case NoLeakChecking: 371 return 372 case UninitializedLeakChecking: 373 note = "(Leak checker uninitialized): " 374 } 375 if n := r.ReadRefs(); n != 0 { 376 msg := fmt.Sprintf("%sAtomicRefCount %p owned by %q garbage collected with ref count of %d (want 0)", note, r, r.name, n) 377 if len(r.stack) != 0 { 378 msg += ":\nCaller:\n" + FormatStack(r.stack) 379 } else { 380 msg += " (enable trace logging to debug)" 381 } 382 log.Warningf(msg) 383 } 384 } 385 386 // EnableLeakCheck checks for reference leaks when the AtomicRefCount gets 387 // garbage collected. 388 // 389 // This function adds a finalizer to the AtomicRefCount, so the AtomicRefCount 390 // must be at the beginning of its parent. 391 // 392 // name is a friendly name that will be listed as the owner of the 393 // AtomicRefCount in logs. It should be the name of the parent type, including 394 // package. 395 func (r *AtomicRefCount) EnableLeakCheck(name string) { 396 if name == "" { 397 panic("invalid name") 398 } 399 switch LeakMode(atomic.LoadUint32(&leakMode)) { 400 case NoLeakChecking: 401 return 402 case LeaksLogTraces: 403 r.stack = RecordStack() 404 } 405 r.name = name 406 runtime.SetFinalizer(r, (*AtomicRefCount).finalize) 407 } 408 409 // ReadRefs returns the current number of references. The returned count is 410 // inherently racy and is unsafe to use without external synchronization. 411 func (r *AtomicRefCount) ReadRefs() int64 { 412 // Account for the internal -1 offset on refcounts. 413 return atomic.LoadInt64(&r.refCount) + 1 414 } 415 416 // IncRef increments this object's reference count. While the count is kept 417 // greater than zero, the destructor doesn't get called. 418 // 419 // The sanity check here is limited to real references, since if they have 420 // dropped beneath zero then the object should have been destroyed. 421 // 422 //go:nosplit 423 func (r *AtomicRefCount) IncRef() { 424 if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { 425 panic("Incrementing non-positive ref count") 426 } 427 } 428 429 // TryIncRef attempts to increment the reference count, *unless the count has 430 // already reached zero*. If false is returned, then the object has already 431 // been destroyed, and the weak reference is no longer valid. If true if 432 // returned then a valid reference is now held on the object. 433 // 434 // To do this safely without a loop, a speculative reference is first acquired 435 // on the object. This allows multiple concurrent TryIncRef calls to 436 // distinguish other TryIncRef calls from genuine references held. 437 // 438 //go:nosplit 439 func (r *AtomicRefCount) TryIncRef() bool { 440 const speculativeRef = 1 << 32 441 v := atomic.AddInt64(&r.refCount, speculativeRef) 442 if int32(v) < 0 { 443 // This object has already been freed. 444 atomic.AddInt64(&r.refCount, -speculativeRef) 445 return false 446 } 447 448 // Turn into a real reference. 449 atomic.AddInt64(&r.refCount, -speculativeRef+1) 450 return true 451 } 452 453 // addWeakRef adds the given weak reference. 454 func (r *AtomicRefCount) addWeakRef(w *WeakRef) { 455 r.mu.Lock() 456 r.weakRefs.PushBack(w) 457 r.mu.Unlock() 458 } 459 460 // dropWeakRef drops the given weak reference. 461 func (r *AtomicRefCount) dropWeakRef(w *WeakRef) { 462 r.mu.Lock() 463 r.weakRefs.Remove(w) 464 r.mu.Unlock() 465 } 466 467 // DecRefWithDestructor decrements the object's reference count. If the 468 // resulting count is negative and the destructor is not nil, then the 469 // destructor will be called. 470 // 471 // Note that speculative references are counted here. Since they were added 472 // prior to real references reaching zero, they will successfully convert to 473 // real references. In other words, we see speculative references only in the 474 // following case: 475 // 476 // A: TryIncRef [speculative increase => sees non-negative references] 477 // B: DecRef [real decrease] 478 // A: TryIncRef [transform speculative to real] 479 // 480 //go:nosplit 481 func (r *AtomicRefCount) DecRefWithDestructor(ctx context.Context, destroy func(context.Context)) { 482 switch v := atomic.AddInt64(&r.refCount, -1); { 483 case v < -1: 484 panic("Decrementing non-positive ref count") 485 486 case v == -1: 487 // Zap weak references. Note that at this point, all weak 488 // references are already invalid. That is, TryIncRef() will 489 // return false due to the reference count check. 490 r.mu.Lock() 491 for !r.weakRefs.Empty() { 492 w := r.weakRefs.Front() 493 // Capture the callback because w cannot be touched 494 // after it's zapped -- the owner is free it reuse it 495 // after that. 496 user := w.user 497 r.weakRefs.Remove(w) 498 w.zap() 499 500 if user != nil { 501 r.mu.Unlock() 502 user.WeakRefGone(ctx) 503 r.mu.Lock() 504 } 505 } 506 r.mu.Unlock() 507 508 // Call the destructor. 509 if destroy != nil { 510 destroy(ctx) 511 } 512 } 513 } 514 515 // DecRef decrements this object's reference count. 516 // 517 //go:nosplit 518 func (r *AtomicRefCount) DecRef(ctx context.Context) { 519 r.DecRefWithDestructor(ctx, nil) 520 } 521 522 // OnExit is called on sandbox exit. It runs GC to enqueue refcount finalizers, 523 // which check for reference leaks. There is no way to guarantee that every 524 // finalizer will run before exiting, but this at least ensures that they will 525 // be discovered/enqueued by GC. 526 func OnExit() { 527 if LeakMode(atomic.LoadUint32(&leakMode)) != NoLeakChecking { 528 runtime.GC() 529 } 530 }