inet.af/netstack@v0.0.0-20220214151720-7585b01ddccf/tcpip/stack/packet_buffer_refs.go (about)

     1  package stack
     2  
     3  import (
     4  	"fmt"
     5  	"sync/atomic"
     6  
     7  	"inet.af/netstack/refsvfs2"
     8  )
     9  
    10  // enableLogging indicates whether reference-related events should be logged (with
    11  // stack traces). This is false by default and should only be set to true for
    12  // debugging purposes, as it can generate an extremely large amount of output
    13  // and drastically degrade performance.
    14  const packetBufferenableLogging = false
    15  
    16  // obj is used to customize logging. Note that we use a pointer to T so that
    17  // we do not copy the entire object when passed as a format parameter.
    18  var packetBufferobj *PacketBuffer
    19  
    20  // Refs implements refs.RefCounter. It keeps a reference count using atomic
    21  // operations and calls the destructor when the count reaches zero.
    22  //
    23  // NOTE: Do not introduce additional fields to the Refs struct. It is used by
    24  // many filesystem objects, and we want to keep it as small as possible (i.e.,
    25  // the same size as using an int64 directly) to avoid taking up extra cache
    26  // space. In general, this template should not be extended at the cost of
    27  // performance. If it does not offer enough flexibility for a particular object
    28  // (example: b/187877947), we should implement the RefCounter/CheckedObject
    29  // interfaces manually.
    30  //
    31  // +stateify savable
    32  type packetBufferRefs struct {
    33  	// refCount is composed of two fields:
    34  	//
    35  	//	[32-bit speculative references]:[32-bit real references]
    36  	//
    37  	// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
    38  	// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
    39  	// used.
    40  	refCount int64
    41  }
    42  
    43  // InitRefs initializes r with one reference and, if enabled, activates leak
    44  // checking.
    45  func (r *packetBufferRefs) InitRefs() {
    46  	atomic.StoreInt64(&r.refCount, 1)
    47  	refsvfs2.Register(r)
    48  }
    49  
    50  // RefType implements refsvfs2.CheckedObject.RefType.
    51  func (r *packetBufferRefs) RefType() string {
    52  	return fmt.Sprintf("%T", packetBufferobj)[1:]
    53  }
    54  
    55  // LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
    56  func (r *packetBufferRefs) LeakMessage() string {
    57  	return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
    58  }
    59  
    60  // LogRefs implements refsvfs2.CheckedObject.LogRefs.
    61  func (r *packetBufferRefs) LogRefs() bool {
    62  	return packetBufferenableLogging
    63  }
    64  
    65  // ReadRefs returns the current number of references. The returned count is
    66  // inherently racy and is unsafe to use without external synchronization.
    67  func (r *packetBufferRefs) ReadRefs() int64 {
    68  	return atomic.LoadInt64(&r.refCount)
    69  }
    70  
    71  // IncRef implements refs.RefCounter.IncRef.
    72  //
    73  //go:nosplit
    74  func (r *packetBufferRefs) IncRef() {
    75  	v := atomic.AddInt64(&r.refCount, 1)
    76  	if packetBufferenableLogging {
    77  		refsvfs2.LogIncRef(r, v)
    78  	}
    79  	if v <= 1 {
    80  		panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
    81  	}
    82  }
    83  
    84  // TryIncRef implements refs.TryRefCounter.TryIncRef.
    85  //
    86  // To do this safely without a loop, a speculative reference is first acquired
    87  // on the object. This allows multiple concurrent TryIncRef calls to distinguish
    88  // other TryIncRef calls from genuine references held.
    89  //
    90  //go:nosplit
    91  func (r *packetBufferRefs) TryIncRef() bool {
    92  	const speculativeRef = 1 << 32
    93  	if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
    94  
    95  		atomic.AddInt64(&r.refCount, -speculativeRef)
    96  		return false
    97  	}
    98  
    99  	v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
   100  	if packetBufferenableLogging {
   101  		refsvfs2.LogTryIncRef(r, v)
   102  	}
   103  	return true
   104  }
   105  
   106  // DecRef implements refs.RefCounter.DecRef.
   107  //
   108  // Note that speculative references are counted here. Since they were added
   109  // prior to real references reaching zero, they will successfully convert to
   110  // real references. In other words, we see speculative references only in the
   111  // following case:
   112  //
   113  //	A: TryIncRef [speculative increase => sees non-negative references]
   114  //	B: DecRef [real decrease]
   115  //	A: TryIncRef [transform speculative to real]
   116  //
   117  //go:nosplit
   118  func (r *packetBufferRefs) DecRef(destroy func()) {
   119  	v := atomic.AddInt64(&r.refCount, -1)
   120  	if packetBufferenableLogging {
   121  		refsvfs2.LogDecRef(r, v)
   122  	}
   123  	switch {
   124  	case v < 0:
   125  		panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
   126  
   127  	case v == 0:
   128  		refsvfs2.Unregister(r)
   129  
   130  		if destroy != nil {
   131  			destroy()
   132  		}
   133  	}
   134  }
   135  
   136  func (r *packetBufferRefs) afterLoad() {
   137  	if r.ReadRefs() > 0 {
   138  		refsvfs2.Register(r)
   139  	}
   140  }