github.com/puzpuzpuz/xsync/v2@v2.5.2-0.20231021165734-92b8269e19a9/rbmutex.go (about)

     1  package xsync
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  )
     9  
    10  // slow-down guard
    11  const nslowdown = 7
    12  
    13  // pool for reader tokens
    14  var rtokenPool sync.Pool
    15  
    16  // RToken is a reader lock token.
    17  type RToken struct {
    18  	slot uint32
    19  	//lint:ignore U1000 prevents false sharing
    20  	pad [cacheLineSize - 4]byte
    21  }
    22  
    23  // A RBMutex is a reader biased reader/writer mutual exclusion lock.
    24  // The lock can be held by an many readers or a single writer.
    25  // The zero value for a RBMutex is an unlocked mutex.
    26  //
    27  // A RBMutex must not be copied after first use.
    28  //
    29  // RBMutex is based on a modified version of BRAVO
    30  // (Biased Locking for Reader-Writer Locks) algorithm:
    31  // https://arxiv.org/pdf/1810.01553.pdf
    32  //
    33  // RBMutex is a specialized mutex for scenarios, such as caches,
    34  // where the vast majority of locks are acquired by readers and write
    35  // lock acquire attempts are infrequent. In such scenarios, RBMutex
    36  // performs better than sync.RWMutex on large multicore machines.
    37  //
    38  // RBMutex extends sync.RWMutex internally and uses it as the "reader
    39  // bias disabled" fallback, so the same semantics apply. The only
    40  // noticeable difference is in reader tokens returned from the
    41  // RLock/RUnlock methods.
    42  type RBMutex struct {
    43  	rslots       []rslot
    44  	rmask        uint32
    45  	rbias        int32
    46  	inhibitUntil time.Time
    47  	rw           sync.RWMutex
    48  }
    49  
    50  type rslot struct {
    51  	mu int32
    52  	//lint:ignore U1000 prevents false sharing
    53  	pad [cacheLineSize - 4]byte
    54  }
    55  
    56  // NewRBMutex creates a new RBMutex instance.
    57  func NewRBMutex() *RBMutex {
    58  	nslots := nextPowOf2(parallelism())
    59  	mu := RBMutex{
    60  		rslots: make([]rslot, nslots),
    61  		rmask:  nslots - 1,
    62  		rbias:  1,
    63  	}
    64  	return &mu
    65  }
    66  
    67  // RLock locks m for reading and returns a reader token. The
    68  // token must be used in the later RUnlock call.
    69  //
    70  // Should not be used for recursive read locking; a blocked Lock
    71  // call excludes new readers from acquiring the lock.
    72  func (mu *RBMutex) RLock() *RToken {
    73  	if atomic.LoadInt32(&mu.rbias) == 1 {
    74  		t, ok := rtokenPool.Get().(*RToken)
    75  		if !ok {
    76  			t = new(RToken)
    77  			t.slot = fastrand()
    78  		}
    79  		// Try all available slots to distribute reader threads to slots.
    80  		for i := 0; i < len(mu.rslots); i++ {
    81  			slot := t.slot + uint32(i)
    82  			rslot := &mu.rslots[slot&mu.rmask]
    83  			rslotmu := atomic.LoadInt32(&rslot.mu)
    84  			if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
    85  				if atomic.LoadInt32(&mu.rbias) == 1 {
    86  					// Hot path succeeded.
    87  					t.slot = slot
    88  					return t
    89  				}
    90  				// The mutex is no longer reader biased. Go to the slow path.
    91  				atomic.AddInt32(&rslot.mu, -1)
    92  				rtokenPool.Put(t)
    93  				break
    94  			}
    95  			// Contention detected. Give a try with the next slot.
    96  		}
    97  	}
    98  	// Slow path.
    99  	mu.rw.RLock()
   100  	if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
   101  		atomic.StoreInt32(&mu.rbias, 1)
   102  	}
   103  	return nil
   104  }
   105  
   106  // RUnlock undoes a single RLock call. A reader token obtained from
   107  // the RLock call must be provided. RUnlock does not affect other
   108  // simultaneous readers. A panic is raised if m is not locked for
   109  // reading on entry to RUnlock.
   110  func (mu *RBMutex) RUnlock(t *RToken) {
   111  	if t == nil {
   112  		mu.rw.RUnlock()
   113  		return
   114  	}
   115  	if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
   116  		panic("invalid reader state detected")
   117  	}
   118  	rtokenPool.Put(t)
   119  }
   120  
   121  // Lock locks m for writing. If the lock is already locked for
   122  // reading or writing, Lock blocks until the lock is available.
   123  func (mu *RBMutex) Lock() {
   124  	mu.rw.Lock()
   125  	if atomic.LoadInt32(&mu.rbias) == 1 {
   126  		atomic.StoreInt32(&mu.rbias, 0)
   127  		start := time.Now()
   128  		for i := 0; i < len(mu.rslots); i++ {
   129  			for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
   130  				runtime.Gosched()
   131  			}
   132  		}
   133  		mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
   134  	}
   135  }
   136  
   137  // Unlock unlocks m for writing. A panic is raised if m is not locked
   138  // for writing on entry to Unlock.
   139  //
   140  // As with RWMutex, a locked RBMutex is not associated with a
   141  // particular goroutine. One goroutine may RLock (Lock) a RBMutex and
   142  // then arrange for another goroutine to RUnlock (Unlock) it.
   143  func (mu *RBMutex) Unlock() {
   144  	mu.rw.Unlock()
   145  }