github.com/dmmcquay/sia@v1.3.1-0.20180712220038-9f8d535311b9/sync/tryrwmutex.go (about)

     1  package sync
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  	"sync/atomic"
     7  )
     8  
     9  const (
    10  	readOffset  = uint64(1)
    11  	tryMask     = uint64(1099511627775) // equivalent to setting the first 39 bits to '1'.
    12  	tryOffset   = uint64(1 << 40)
    13  	writeOffset = uint64(1 << 20)
    14  )
    15  
    16  // TryRWMutex allows you to try to grab a RWMutex, failing if the mutex is
    17  // unavailable. Standard blocking RLock and Lock calls also available.
    18  //
    19  // Note that there will be inconsistencies if there are more than 1 << 20
    20  // operations active at once.
    21  type TryRWMutex struct {
    22  	lock uint64
    23  	mu   sync.RWMutex
    24  }
    25  
    26  // Lock blocks until the mutex is available, and then locks it.
    27  func (tm *TryRWMutex) Lock() {
    28  	// Signal that a write lock is waiting.
    29  	v := atomic.AddUint64(&tm.lock, writeOffset)
    30  
    31  	// Spin until there is no contention from a Try call.
    32  	for v > tryOffset {
    33  		runtime.Gosched()
    34  		v = atomic.LoadUint64(&tm.lock)
    35  	}
    36  
    37  	// Grab the lock.
    38  	tm.mu.Lock()
    39  }
    40  
    41  // RLock blocks until the mutex is available, then grabs a read lock.
    42  func (tm *TryRWMutex) RLock() {
    43  	// Signal that a read lock is waiting.
    44  	v := atomic.AddUint64(&tm.lock, readOffset)
    45  
    46  	// Spin until there is no contention from a Try call.
    47  	for v > tryOffset {
    48  		runtime.Gosched()
    49  		v = atomic.LoadUint64(&tm.lock)
    50  	}
    51  
    52  	// Grab the lock.
    53  	tm.mu.RLock()
    54  }
    55  
    56  // RUnlock releases a read lock on the mutex.
    57  func (tm *TryRWMutex) RUnlock() {
    58  	// Release the lock, then signal that the read lock is no longer waiting.
    59  	tm.mu.RUnlock()
    60  	atomic.AddUint64(&tm.lock, ^(readOffset - 1))
    61  }
    62  
    63  // TryLock grabs a lock on the mutex, returning false if the mutex is
    64  // unavailable.
    65  func (tm *TryRWMutex) TryLock() bool {
    66  	// If there are no readlocks waiting, and no writelocks waiting, signal
    67  	// that a writelock is waiting and that there contention from a Try call.
    68  	if atomic.CompareAndSwapUint64(&tm.lock, 0, writeOffset+tryOffset) {
    69  		tm.mu.Lock()
    70  		// Signal that the Try call contention is resolved.
    71  		atomic.AddUint64(&tm.lock, ^(tryOffset - 1))
    72  		return true
    73  	}
    74  	return false
    75  }
    76  
    77  // TryRLock grabs a read lock on the mutex, returning false if the mutex is
    78  // already locked.
    79  func (tm *TryRWMutex) TryRLock() bool {
    80  	// Signal that a read lock is waiting, and that there is contention from a
    81  	// Try call.
    82  	v := atomic.AddUint64(&tm.lock, readOffset+tryOffset)
    83  	// Mask the try offset when performing the comparison.
    84  	v = v & tryMask
    85  	if v > writeOffset {
    86  		// If there is a write lock waiting, revert the signal and return
    87  		// false.
    88  		atomic.AddUint64(&tm.lock, ^(readOffset + tryOffset - 1))
    89  		return false
    90  	}
    91  	// Grab the read lock and return true.
    92  	tm.mu.RLock()
    93  	// Signal that the Try call contention is resolved.
    94  	atomic.AddUint64(&tm.lock, ^(tryOffset - 1))
    95  	return true
    96  }
    97  
    98  // Unlock releases a lock on the mutex.
    99  func (tm *TryRWMutex) Unlock() {
   100  	tm.mu.Unlock()
   101  	atomic.AddUint64(&tm.lock, ^(writeOffset - 1))
   102  }