github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/socket/unix/transport/queue_refs.go (about)

     1  package transport
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	"github.com/metacubex/gvisor/pkg/atomicbitops"
     8  	"github.com/metacubex/gvisor/pkg/refs"
     9  )
    10  
    11  // enableLogging indicates whether reference-related events should be logged (with
    12  // stack traces). This is false by default and should only be set to true for
    13  // debugging purposes, as it can generate an extremely large amount of output
    14  // and drastically degrade performance.
    15  const queueenableLogging = false
    16  
    17  // obj is used to customize logging. Note that we use a pointer to T so that
    18  // we do not copy the entire object when passed as a format parameter.
    19  var queueobj *queue
    20  
    21  // Refs implements refs.RefCounter. It keeps a reference count using atomic
    22  // operations and calls the destructor when the count reaches zero.
    23  //
    24  // NOTE: Do not introduce additional fields to the Refs struct. It is used by
    25  // many filesystem objects, and we want to keep it as small as possible (i.e.,
    26  // the same size as using an int64 directly) to avoid taking up extra cache
    27  // space. In general, this template should not be extended at the cost of
    28  // performance. If it does not offer enough flexibility for a particular object
    29  // (example: b/187877947), we should implement the RefCounter/CheckedObject
    30  // interfaces manually.
    31  //
    32  // +stateify savable
    33  type queueRefs struct {
    34  	// refCount is composed of two fields:
    35  	//
    36  	//	[32-bit speculative references]:[32-bit real references]
    37  	//
    38  	// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
    39  	// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
    40  	// used.
    41  	refCount atomicbitops.Int64
    42  }
    43  
    44  // InitRefs initializes r with one reference and, if enabled, activates leak
    45  // checking.
    46  func (r *queueRefs) InitRefs() {
    47  
    48  	r.refCount.RacyStore(1)
    49  	refs.Register(r)
    50  }
    51  
    52  // RefType implements refs.CheckedObject.RefType.
    53  func (r *queueRefs) RefType() string {
    54  	return fmt.Sprintf("%T", queueobj)[1:]
    55  }
    56  
    57  // LeakMessage implements refs.CheckedObject.LeakMessage.
    58  func (r *queueRefs) LeakMessage() string {
    59  	return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
    60  }
    61  
    62  // LogRefs implements refs.CheckedObject.LogRefs.
    63  func (r *queueRefs) LogRefs() bool {
    64  	return queueenableLogging
    65  }
    66  
    67  // ReadRefs returns the current number of references. The returned count is
    68  // inherently racy and is unsafe to use without external synchronization.
    69  func (r *queueRefs) ReadRefs() int64 {
    70  	return r.refCount.Load()
    71  }
    72  
    73  // IncRef implements refs.RefCounter.IncRef.
    74  //
    75  //go:nosplit
    76  func (r *queueRefs) IncRef() {
    77  	v := r.refCount.Add(1)
    78  	if queueenableLogging {
    79  		refs.LogIncRef(r, v)
    80  	}
    81  	if v <= 1 {
    82  		panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
    83  	}
    84  }
    85  
    86  // TryIncRef implements refs.TryRefCounter.TryIncRef.
    87  //
    88  // To do this safely without a loop, a speculative reference is first acquired
    89  // on the object. This allows multiple concurrent TryIncRef calls to distinguish
    90  // other TryIncRef calls from genuine references held.
    91  //
    92  //go:nosplit
    93  func (r *queueRefs) TryIncRef() bool {
    94  	const speculativeRef = 1 << 32
    95  	if v := r.refCount.Add(speculativeRef); int32(v) == 0 {
    96  
    97  		r.refCount.Add(-speculativeRef)
    98  		return false
    99  	}
   100  
   101  	v := r.refCount.Add(-speculativeRef + 1)
   102  	if queueenableLogging {
   103  		refs.LogTryIncRef(r, v)
   104  	}
   105  	return true
   106  }
   107  
   108  // DecRef implements refs.RefCounter.DecRef.
   109  //
   110  // Note that speculative references are counted here. Since they were added
   111  // prior to real references reaching zero, they will successfully convert to
   112  // real references. In other words, we see speculative references only in the
   113  // following case:
   114  //
   115  //	A: TryIncRef [speculative increase => sees non-negative references]
   116  //	B: DecRef [real decrease]
   117  //	A: TryIncRef [transform speculative to real]
   118  //
   119  //go:nosplit
   120  func (r *queueRefs) DecRef(destroy func()) {
   121  	v := r.refCount.Add(-1)
   122  	if queueenableLogging {
   123  		refs.LogDecRef(r, v)
   124  	}
   125  	switch {
   126  	case v < 0:
   127  		panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
   128  
   129  	case v == 0:
   130  		refs.Unregister(r)
   131  
   132  		if destroy != nil {
   133  			destroy()
   134  		}
   135  	}
   136  }
   137  
   138  func (r *queueRefs) afterLoad(context.Context) {
   139  	if r.ReadRefs() > 0 {
   140  		refs.Register(r)
   141  	}
   142  }