github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sync/rwmutex_unsafe.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Copyright 2019 The gVisor Authors.
     3  // Use of this source code is governed by a BSD-style
     4  // license that can be found in the LICENSE file.
     5  
     6  // This is mostly copied from the standard library's sync/rwmutex.go.
     7  //
     8  // Happens-before relationships indicated to the race detector:
     9  //	- Unlock -> Lock (via writerSem)
    10  //	- Unlock -> RLock (via readerSem)
    11  //	- RUnlock -> Lock (via writerSem)
    12  //	- DowngradeLock -> RLock (via readerSem)
    13  
    14  package sync
    15  
    16  import (
    17  	"sync/atomic"
    18  	"unsafe"
    19  )
    20  
    21  // CrossGoroutineRWMutex is equivalent to RWMutex, but it need not be unlocked
    22  // by a the same goroutine that locked the mutex.
    23  type CrossGoroutineRWMutex struct {
    24  	// w is held if there are pending writers
    25  	//
    26  	// We use CrossGoroutineMutex rather than Mutex because the lock
    27  	// annotation instrumentation in Mutex will trigger false positives in
    28  	// the race detector when called inside of RaceDisable.
    29  	w           CrossGoroutineMutex
    30  	writerSem   uint32 // semaphore for writers to wait for completing readers
    31  	readerSem   uint32 // semaphore for readers to wait for completing writers
    32  	readerCount int32  // number of pending readers
    33  	readerWait  int32  // number of departing readers
    34  }
    35  
    36  const rwmutexMaxReaders = 1 << 30
    37  
    38  // TryRLock locks rw for reading. It returns true if it succeeds and false
    39  // otherwise. It does not block.
    40  // +checklocksignore
    41  func (rw *CrossGoroutineRWMutex) TryRLock() bool {
    42  	if RaceEnabled {
    43  		RaceDisable()
    44  	}
    45  	for {
    46  		rc := atomic.LoadInt32(&rw.readerCount)
    47  		if rc < 0 {
    48  			if RaceEnabled {
    49  				RaceEnable()
    50  			}
    51  			return false
    52  		}
    53  		if !atomic.CompareAndSwapInt32(&rw.readerCount, rc, rc+1) {
    54  			continue
    55  		}
    56  		if RaceEnabled {
    57  			RaceEnable()
    58  			RaceAcquire(unsafe.Pointer(&rw.readerSem))
    59  		}
    60  		return true
    61  	}
    62  }
    63  
    64  // RLock locks rw for reading.
    65  //
    66  // It should not be used for recursive read locking; a blocked Lock call
    67  // excludes new readers from acquiring the lock. See the documentation on the
    68  // RWMutex type.
    69  // +checklocksignore
    70  func (rw *CrossGoroutineRWMutex) RLock() {
    71  	if RaceEnabled {
    72  		RaceDisable()
    73  	}
    74  	if atomic.AddInt32(&rw.readerCount, 1) < 0 {
    75  		// A writer is pending, wait for it.
    76  		semacquire(&rw.readerSem)
    77  	}
    78  	if RaceEnabled {
    79  		RaceEnable()
    80  		RaceAcquire(unsafe.Pointer(&rw.readerSem))
    81  	}
    82  }
    83  
    84  // RUnlock undoes a single RLock call.
    85  //
    86  // Preconditions:
    87  //   - rw is locked for reading.
    88  //
    89  // +checklocksignore
    90  func (rw *CrossGoroutineRWMutex) RUnlock() {
    91  	if RaceEnabled {
    92  		RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
    93  		RaceDisable()
    94  	}
    95  	if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
    96  		if r+1 == 0 || r+1 == -rwmutexMaxReaders {
    97  			panic("RUnlock of unlocked RWMutex")
    98  		}
    99  		// A writer is pending.
   100  		if atomic.AddInt32(&rw.readerWait, -1) == 0 {
   101  			// The last reader unblocks the writer.
   102  			semrelease(&rw.writerSem, false, 0)
   103  		}
   104  	}
   105  	if RaceEnabled {
   106  		RaceEnable()
   107  	}
   108  }
   109  
   110  // TryLock locks rw for writing. It returns true if it succeeds and false
   111  // otherwise. It does not block.
   112  // +checklocksignore
   113  func (rw *CrossGoroutineRWMutex) TryLock() bool {
   114  	if RaceEnabled {
   115  		RaceDisable()
   116  	}
   117  	// First, resolve competition with other writers.
   118  	if !rw.w.TryLock() {
   119  		if RaceEnabled {
   120  			RaceEnable()
   121  		}
   122  		return false
   123  	}
   124  	// Only proceed if there are no readers.
   125  	if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
   126  		rw.w.Unlock()
   127  		if RaceEnabled {
   128  			RaceEnable()
   129  		}
   130  		return false
   131  	}
   132  	if RaceEnabled {
   133  		RaceEnable()
   134  		RaceAcquire(unsafe.Pointer(&rw.writerSem))
   135  	}
   136  	return true
   137  }
   138  
   139  // Lock locks rw for writing. If the lock is already locked for reading or
   140  // writing, Lock blocks until the lock is available.
   141  // +checklocksignore
   142  func (rw *CrossGoroutineRWMutex) Lock() {
   143  	if RaceEnabled {
   144  		RaceDisable()
   145  	}
   146  	// First, resolve competition with other writers.
   147  	rw.w.Lock()
   148  	// Announce to readers there is a pending writer.
   149  	r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
   150  	// Wait for active readers.
   151  	if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
   152  		semacquire(&rw.writerSem)
   153  	}
   154  	if RaceEnabled {
   155  		RaceEnable()
   156  		RaceAcquire(unsafe.Pointer(&rw.writerSem))
   157  	}
   158  }
   159  
   160  // Unlock unlocks rw for writing.
   161  //
   162  // Preconditions:
   163  //   - rw is locked for writing.
   164  //
   165  // +checklocksignore
   166  func (rw *CrossGoroutineRWMutex) Unlock() {
   167  	if RaceEnabled {
   168  		RaceRelease(unsafe.Pointer(&rw.writerSem))
   169  		RaceRelease(unsafe.Pointer(&rw.readerSem))
   170  		RaceDisable()
   171  	}
   172  	// Announce to readers there is no active writer.
   173  	r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
   174  	if r >= rwmutexMaxReaders {
   175  		panic("Unlock of unlocked RWMutex")
   176  	}
   177  	// Unblock blocked readers, if any.
   178  	for i := 0; i < int(r); i++ {
   179  		semrelease(&rw.readerSem, false, 0)
   180  	}
   181  	// Allow other writers to proceed.
   182  	rw.w.Unlock()
   183  	if RaceEnabled {
   184  		RaceEnable()
   185  	}
   186  }
   187  
   188  // DowngradeLock atomically unlocks rw for writing and locks it for reading.
   189  //
   190  // Preconditions:
   191  //   - rw is locked for writing.
   192  //
   193  // +checklocksignore
   194  func (rw *CrossGoroutineRWMutex) DowngradeLock() {
   195  	if RaceEnabled {
   196  		RaceRelease(unsafe.Pointer(&rw.readerSem))
   197  		RaceDisable()
   198  	}
   199  	// Announce to readers there is no active writer and one additional reader.
   200  	r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
   201  	if r >= rwmutexMaxReaders+1 {
   202  		panic("DowngradeLock of unlocked RWMutex")
   203  	}
   204  	// Unblock blocked readers, if any. Note that this loop starts as 1 since r
   205  	// includes this goroutine.
   206  	for i := 1; i < int(r); i++ {
   207  		semrelease(&rw.readerSem, false, 0)
   208  	}
   209  	// Allow other writers to proceed to rw.w.Lock(). Note that they will still
   210  	// block on rw.writerSem since at least this reader exists, such that
   211  	// DowngradeLock() is atomic with the previous write lock.
   212  	rw.w.Unlock()
   213  	if RaceEnabled {
   214  		RaceEnable()
   215  	}
   216  }
   217  
   218  // A RWMutex is a reader/writer mutual exclusion lock. The lock can be held by
   219  // an arbitrary number of readers or a single writer. The zero value for a
   220  // RWMutex is an unlocked mutex.
   221  //
   222  // A RWMutex must not be copied after first use.
   223  //
   224  // If a goroutine holds a RWMutex for reading and another goroutine might call
   225  // Lock, no goroutine should expect to be able to acquire a read lock until the
   226  // initial read lock is released. In particular, this prohibits recursive read
   227  // locking. This is to ensure that the lock eventually becomes available; a
   228  // blocked Lock call excludes new readers from acquiring the lock.
   229  //
   230  // A Mutex must be unlocked by the same goroutine that locked it. This
   231  // invariant is enforced with the 'checklocks' build tag.
   232  type RWMutex struct {
   233  	m CrossGoroutineRWMutex
   234  }
   235  
   236  // TryRLock locks rw for reading. It returns true if it succeeds and false
   237  // otherwise. It does not block.
   238  // +checklocksignore
   239  func (rw *RWMutex) TryRLock() bool {
   240  	// Note lock first to enforce proper locking even if unsuccessful.
   241  	noteLock(unsafe.Pointer(rw))
   242  	locked := rw.m.TryRLock()
   243  	if !locked {
   244  		noteUnlock(unsafe.Pointer(rw))
   245  	}
   246  	return locked
   247  }
   248  
   249  // RLock locks rw for reading.
   250  //
   251  // It should not be used for recursive read locking; a blocked Lock call
   252  // excludes new readers from acquiring the lock. See the documentation on the
   253  // RWMutex type.
   254  // +checklocksignore
   255  func (rw *RWMutex) RLock() {
   256  	noteLock(unsafe.Pointer(rw))
   257  	rw.m.RLock()
   258  }
   259  
   260  // RUnlock undoes a single RLock call.
   261  //
   262  // Preconditions:
   263  //   - rw is locked for reading.
   264  //   - rw was locked by this goroutine.
   265  //
   266  // +checklocksignore
   267  func (rw *RWMutex) RUnlock() {
   268  	rw.m.RUnlock()
   269  	noteUnlock(unsafe.Pointer(rw))
   270  }
   271  
   272  // TryLock locks rw for writing. It returns true if it succeeds and false
   273  // otherwise. It does not block.
   274  // +checklocksignore
   275  func (rw *RWMutex) TryLock() bool {
   276  	// Note lock first to enforce proper locking even if unsuccessful.
   277  	noteLock(unsafe.Pointer(rw))
   278  	locked := rw.m.TryLock()
   279  	if !locked {
   280  		noteUnlock(unsafe.Pointer(rw))
   281  	}
   282  	return locked
   283  }
   284  
   285  // Lock locks rw for writing. If the lock is already locked for reading or
   286  // writing, Lock blocks until the lock is available.
   287  // +checklocksignore
   288  func (rw *RWMutex) Lock() {
   289  	noteLock(unsafe.Pointer(rw))
   290  	rw.m.Lock()
   291  }
   292  
   293  // Unlock unlocks rw for writing.
   294  //
   295  // Preconditions:
   296  //   - rw is locked for writing.
   297  //   - rw was locked by this goroutine.
   298  //
   299  // +checklocksignore
   300  func (rw *RWMutex) Unlock() {
   301  	rw.m.Unlock()
   302  	noteUnlock(unsafe.Pointer(rw))
   303  }
   304  
   305  // DowngradeLock atomically unlocks rw for writing and locks it for reading.
   306  //
   307  // Preconditions:
   308  //   - rw is locked for writing.
   309  //
   310  // +checklocksignore
   311  func (rw *RWMutex) DowngradeLock() {
   312  	// No note change for DowngradeLock.
   313  	rw.m.DowngradeLock()
   314  }