github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sync/rwmutex_unsafe.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Copyright 2019 The gVisor Authors.
     3  // Use of this source code is governed by a BSD-style
     4  // license that can be found in the LICENSE file.
     5  
     6  // This is mostly copied from the standard library's sync/rwmutex.go.
     7  //
     8  // Happens-before relationships indicated to the race detector:
     9  // - Unlock -> Lock (via writerSem)
    10  // - Unlock -> RLock (via readerSem)
    11  // - RUnlock -> Lock (via writerSem)
    12  // - DowngradeLock -> RLock (via readerSem)
    13  
    14  package sync
    15  
    16  import (
    17  	"sync/atomic"
    18  	"unsafe"
    19  )
    20  
    21  // CrossGoroutineRWMutex is equivalent to RWMutex, but it need not be unlocked
    22  // by a the same goroutine that locked the mutex.
    23  type CrossGoroutineRWMutex struct {
    24  	// w is held if there are pending writers
    25  	//
    26  	// We use CrossGoroutineMutex rather than Mutex because the lock
    27  	// annotation instrumentation in Mutex will trigger false positives in
    28  	// the race detector when called inside of RaceDisable.
    29  	w           CrossGoroutineMutex
    30  	writerSem   uint32 // semaphore for writers to wait for completing readers
    31  	readerSem   uint32 // semaphore for readers to wait for completing writers
    32  	readerCount int32  // number of pending readers
    33  	readerWait  int32  // number of departing readers
    34  }
    35  
    36  const rwmutexMaxReaders = 1 << 30
    37  
    38  // TryRLock locks rw for reading. It returns true if it succeeds and false
    39  // otherwise. It does not block.
    40  // +checklocksignore
    41  func (rw *CrossGoroutineRWMutex) TryRLock() bool {
    42  	if RaceEnabled {
    43  		RaceDisable()
    44  	}
    45  	for {
    46  		rc := atomic.LoadInt32(&rw.readerCount)
    47  		if rc < 0 {
    48  			if RaceEnabled {
    49  				RaceEnable()
    50  			}
    51  			return false
    52  		}
    53  		if !atomic.CompareAndSwapInt32(&rw.readerCount, rc, rc+1) {
    54  			continue
    55  		}
    56  		if RaceEnabled {
    57  			RaceEnable()
    58  			RaceAcquire(unsafe.Pointer(&rw.readerSem))
    59  		}
    60  		return true
    61  	}
    62  }
    63  
    64  // RLock locks rw for reading.
    65  //
    66  // It should not be used for recursive read locking; a blocked Lock call
    67  // excludes new readers from acquiring the lock. See the documentation on the
    68  // RWMutex type.
    69  // +checklocksignore
    70  func (rw *CrossGoroutineRWMutex) RLock() {
    71  	if RaceEnabled {
    72  		RaceDisable()
    73  	}
    74  	if atomic.AddInt32(&rw.readerCount, 1) < 0 {
    75  		// A writer is pending, wait for it.
    76  		semacquire(&rw.readerSem)
    77  	}
    78  	if RaceEnabled {
    79  		RaceEnable()
    80  		RaceAcquire(unsafe.Pointer(&rw.readerSem))
    81  	}
    82  }
    83  
    84  // RUnlock undoes a single RLock call.
    85  //
    86  // Preconditions:
    87  // * rw is locked for reading.
    88  // +checklocksignore
    89  func (rw *CrossGoroutineRWMutex) RUnlock() {
    90  	if RaceEnabled {
    91  		RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
    92  		RaceDisable()
    93  	}
    94  	if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
    95  		if r+1 == 0 || r+1 == -rwmutexMaxReaders {
    96  			panic("RUnlock of unlocked RWMutex")
    97  		}
    98  		// A writer is pending.
    99  		if atomic.AddInt32(&rw.readerWait, -1) == 0 {
   100  			// The last reader unblocks the writer.
   101  			semrelease(&rw.writerSem, false, 0)
   102  		}
   103  	}
   104  	if RaceEnabled {
   105  		RaceEnable()
   106  	}
   107  }
   108  
   109  // TryLock locks rw for writing. It returns true if it succeeds and false
   110  // otherwise. It does not block.
   111  // +checklocksignore
   112  func (rw *CrossGoroutineRWMutex) TryLock() bool {
   113  	if RaceEnabled {
   114  		RaceDisable()
   115  	}
   116  	// First, resolve competition with other writers.
   117  	if !rw.w.TryLock() {
   118  		if RaceEnabled {
   119  			RaceEnable()
   120  		}
   121  		return false
   122  	}
   123  	// Only proceed if there are no readers.
   124  	if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
   125  		rw.w.Unlock()
   126  		if RaceEnabled {
   127  			RaceEnable()
   128  		}
   129  		return false
   130  	}
   131  	if RaceEnabled {
   132  		RaceEnable()
   133  		RaceAcquire(unsafe.Pointer(&rw.writerSem))
   134  	}
   135  	return true
   136  }
   137  
   138  // Lock locks rw for writing. If the lock is already locked for reading or
   139  // writing, Lock blocks until the lock is available.
   140  // +checklocksignore
   141  func (rw *CrossGoroutineRWMutex) Lock() {
   142  	if RaceEnabled {
   143  		RaceDisable()
   144  	}
   145  	// First, resolve competition with other writers.
   146  	rw.w.Lock()
   147  	// Announce to readers there is a pending writer.
   148  	r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
   149  	// Wait for active readers.
   150  	if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
   151  		semacquire(&rw.writerSem)
   152  	}
   153  	if RaceEnabled {
   154  		RaceEnable()
   155  		RaceAcquire(unsafe.Pointer(&rw.writerSem))
   156  	}
   157  }
   158  
   159  // Unlock unlocks rw for writing.
   160  //
   161  // Preconditions:
   162  // * rw is locked for writing.
   163  // +checklocksignore
   164  func (rw *CrossGoroutineRWMutex) Unlock() {
   165  	if RaceEnabled {
   166  		RaceRelease(unsafe.Pointer(&rw.writerSem))
   167  		RaceRelease(unsafe.Pointer(&rw.readerSem))
   168  		RaceDisable()
   169  	}
   170  	// Announce to readers there is no active writer.
   171  	r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
   172  	if r >= rwmutexMaxReaders {
   173  		panic("Unlock of unlocked RWMutex")
   174  	}
   175  	// Unblock blocked readers, if any.
   176  	for i := 0; i < int(r); i++ {
   177  		semrelease(&rw.readerSem, false, 0)
   178  	}
   179  	// Allow other writers to proceed.
   180  	rw.w.Unlock()
   181  	if RaceEnabled {
   182  		RaceEnable()
   183  	}
   184  }
   185  
   186  // DowngradeLock atomically unlocks rw for writing and locks it for reading.
   187  //
   188  // Preconditions:
   189  // * rw is locked for writing.
   190  // +checklocksignore
   191  func (rw *CrossGoroutineRWMutex) DowngradeLock() {
   192  	if RaceEnabled {
   193  		RaceRelease(unsafe.Pointer(&rw.readerSem))
   194  		RaceDisable()
   195  	}
   196  	// Announce to readers there is no active writer and one additional reader.
   197  	r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
   198  	if r >= rwmutexMaxReaders+1 {
   199  		panic("DowngradeLock of unlocked RWMutex")
   200  	}
   201  	// Unblock blocked readers, if any. Note that this loop starts as 1 since r
   202  	// includes this goroutine.
   203  	for i := 1; i < int(r); i++ {
   204  		semrelease(&rw.readerSem, false, 0)
   205  	}
   206  	// Allow other writers to proceed to rw.w.Lock(). Note that they will still
   207  	// block on rw.writerSem since at least this reader exists, such that
   208  	// DowngradeLock() is atomic with the previous write lock.
   209  	rw.w.Unlock()
   210  	if RaceEnabled {
   211  		RaceEnable()
   212  	}
   213  }
   214  
   215  // A RWMutex is a reader/writer mutual exclusion lock. The lock can be held by
   216  // an arbitrary number of readers or a single writer. The zero value for a
   217  // RWMutex is an unlocked mutex.
   218  //
   219  // A RWMutex must not be copied after first use.
   220  //
   221  // If a goroutine holds a RWMutex for reading and another goroutine might call
   222  // Lock, no goroutine should expect to be able to acquire a read lock until the
   223  // initial read lock is released. In particular, this prohibits recursive read
   224  // locking. This is to ensure that the lock eventually becomes available; a
   225  // blocked Lock call excludes new readers from acquiring the lock.
   226  //
   227  // A Mutex must be unlocked by the same goroutine that locked it. This
   228  // invariant is enforced with the 'checklocks' build tag.
   229  type RWMutex struct {
   230  	m CrossGoroutineRWMutex
   231  }
   232  
   233  // TryRLock locks rw for reading. It returns true if it succeeds and false
   234  // otherwise. It does not block.
   235  // +checklocksignore
   236  func (rw *RWMutex) TryRLock() bool {
   237  	// Note lock first to enforce proper locking even if unsuccessful.
   238  	noteLock(unsafe.Pointer(rw))
   239  	locked := rw.m.TryRLock()
   240  	if !locked {
   241  		noteUnlock(unsafe.Pointer(rw))
   242  	}
   243  	return locked
   244  }
   245  
   246  // RLock locks rw for reading.
   247  //
   248  // It should not be used for recursive read locking; a blocked Lock call
   249  // excludes new readers from acquiring the lock. See the documentation on the
   250  // RWMutex type.
   251  // +checklocksignore
   252  func (rw *RWMutex) RLock() {
   253  	noteLock(unsafe.Pointer(rw))
   254  	rw.m.RLock()
   255  }
   256  
   257  // RUnlock undoes a single RLock call.
   258  //
   259  // Preconditions:
   260  // * rw is locked for reading.
   261  // * rw was locked by this goroutine.
   262  // +checklocksignore
   263  func (rw *RWMutex) RUnlock() {
   264  	rw.m.RUnlock()
   265  	noteUnlock(unsafe.Pointer(rw))
   266  }
   267  
   268  // TryLock locks rw for writing. It returns true if it succeeds and false
   269  // otherwise. It does not block.
   270  // +checklocksignore
   271  func (rw *RWMutex) TryLock() bool {
   272  	// Note lock first to enforce proper locking even if unsuccessful.
   273  	noteLock(unsafe.Pointer(rw))
   274  	locked := rw.m.TryLock()
   275  	if !locked {
   276  		noteUnlock(unsafe.Pointer(rw))
   277  	}
   278  	return locked
   279  }
   280  
   281  // Lock locks rw for writing. If the lock is already locked for reading or
   282  // writing, Lock blocks until the lock is available.
   283  // +checklocksignore
   284  func (rw *RWMutex) Lock() {
   285  	noteLock(unsafe.Pointer(rw))
   286  	rw.m.Lock()
   287  }
   288  
   289  // Unlock unlocks rw for writing.
   290  //
   291  // Preconditions:
   292  // * rw is locked for writing.
   293  // * rw was locked by this goroutine.
   294  // +checklocksignore
   295  func (rw *RWMutex) Unlock() {
   296  	rw.m.Unlock()
   297  	noteUnlock(unsafe.Pointer(rw))
   298  }
   299  
   300  // DowngradeLock atomically unlocks rw for writing and locks it for reading.
   301  //
   302  // Preconditions:
   303  // * rw is locked for writing.
   304  // +checklocksignore
   305  func (rw *RWMutex) DowngradeLock() {
   306  	// No note change for DowngradeLock.
   307  	rw.m.DowngradeLock()
   308  }