github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/sync/mutex.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package sync provides basic synchronization primitives such as mutual
     6  // exclusion locks. Other than the Once and WaitGroup types, most are intended
     7  // for use by low-level library routines. Higher-level synchronization is
     8  // better done via channels and communication.
     9  //
    10  // Values containing the types defined in this package should not be copied.
    11  package sync
    12  
    13  import (
    14  	"runtime"
    15  	"internal/race"
    16  	"sync/atomic"
    17  	"unsafe"
    18  )
    19  
    20  func throw(string) // provided by runtime
    21  
    22  // A Mutex is a mutual exclusion lock.
    23  // The zero value for a Mutex is an unlocked mutex.
    24  //
    25  // A Mutex must not be copied after first use.
    26  type Mutex struct {
    27  	state int32
    28  	sema  uint32
    29  
    30  	//MYCODE:
    31  	Record TradRecord
    32  	Info runtime.MuInfo
    33  }
    34  
    35  // A Locker represents an object that can be locked and unlocked.
    36  type Locker interface {
    37  	Lock()
    38  	Unlock()
    39  }
    40  
    41  const (
    42  	mutexLocked = 1 << iota // mutex is locked
    43  	mutexWoken
    44  	mutexStarving
    45  	mutexWaiterShift = iota
    46  
    47  	// Mutex fairness.
    48  	//
    49  	// Mutex can be in 2 modes of operations: normal and starvation.
    50  	// In normal mode waiters are queued in FIFO order, but a woken up waiter
    51  	// does not own the mutex and competes with new arriving goroutines over
    52  	// the ownership. New arriving goroutines have an advantage -- they are
    53  	// already running on CPU and there can be lots of them, so a woken up
    54  	// waiter has good chances of losing. In such case it is queued at front
    55  	// of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
    56  	// it switches mutex to the starvation mode.
    57  	//
    58  	// In starvation mode ownership of the mutex is directly handed off from
    59  	// the unlocking goroutine to the waiter at the front of the queue.
    60  	// New arriving goroutines don't try to acquire the mutex even if it appears
    61  	// to be unlocked, and don't try to spin. Instead they queue themselves at
    62  	// the tail of the wait queue.
    63  	//
    64  	// If a waiter receives ownership of the mutex and sees that either
    65  	// (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
    66  	// it switches mutex back to normal operation mode.
    67  	//
    68  	// Normal mode has considerably better performance as a goroutine can acquire
    69  	// a mutex several times in a row even if there are blocked waiters.
    70  	// Starvation mode is important to prevent pathological cases of tail latency.
    71  	starvationThresholdNs = 1e6
    72  )
    73  
    74  // Lock locks m.
    75  // If the lock is already in use, the calling goroutine
    76  // blocks until the mutex is available.
    77  func (m *Mutex) Lock() {
    78  	///MYCODE:
    79  	blockEntry := runtime.EnqueueBlockEntry([]runtime.PrimInfo{&m.Info}, runtime.MuLock)
    80  	defer runtime.DequeueBlockEntry(blockEntry)
    81  	runtime.Monitor(&m.Info)
    82  
    83  	if runtime.BoolRecordTrad {
    84  		runtime.RecordTradOp(&m.Record.PreLoc)
    85  	}
    86  
    87  	// Fast path: grab unlocked mutex.
    88  	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
    89  		if race.Enabled {
    90  			race.Acquire(unsafe.Pointer(m))
    91  		}
    92  		return
    93  	}
    94  	// Slow path (outlined so that the fast path can be inlined)
    95  	m.lockSlow()
    96  }
    97  
    98  func (m *Mutex) lockSlow() {
    99  	var waitStartTime int64
   100  	starving := false
   101  	awoke := false
   102  	iter := 0
   103  	old := m.state
   104  	for {
   105  		// Don't spin in starvation mode, ownership is handed off to waiters
   106  		// so we won't be able to acquire the mutex anyway.
   107  		if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
   108  			// Active spinning makes sense.
   109  			// Try to set mutexWoken flag to inform Unlock
   110  			// to not wake other blocked goroutines.
   111  			if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
   112  				atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
   113  				awoke = true
   114  			}
   115  			runtime_doSpin()
   116  			iter++
   117  			old = m.state
   118  			continue
   119  		}
   120  		new := old
   121  		// Don't try to acquire starving mutex, new arriving goroutines must queue.
   122  		if old&mutexStarving == 0 {
   123  			new |= mutexLocked
   124  		}
   125  		if old&(mutexLocked|mutexStarving) != 0 {
   126  			new += 1 << mutexWaiterShift
   127  		}
   128  		// The current goroutine switches mutex to starvation mode.
   129  		// But if the mutex is currently unlocked, don't do the switch.
   130  		// Unlock expects that starving mutex has waiters, which will not
   131  		// be true in this case.
   132  		if starving && old&mutexLocked != 0 {
   133  			new |= mutexStarving
   134  		}
   135  		if awoke {
   136  			// The goroutine has been woken from sleep,
   137  			// so we need to reset the flag in either case.
   138  			if new&mutexWoken == 0 {
   139  				throw("sync: inconsistent mutex state")
   140  			}
   141  			new &^= mutexWoken
   142  		}
   143  		if atomic.CompareAndSwapInt32(&m.state, old, new) {
   144  			if old&(mutexLocked|mutexStarving) == 0 {
   145  				break // locked the mutex with CAS
   146  			}
   147  			// If we were already waiting before, queue at the front of the queue.
   148  			queueLifo := waitStartTime != 0
   149  			if waitStartTime == 0 {
   150  				waitStartTime = runtime_nanotime()
   151  			}
   152  			runtime_SemacquireMutex(&m.sema, queueLifo, 1)
   153  			starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
   154  			old = m.state
   155  			if old&mutexStarving != 0 {
   156  				// If this goroutine was woken and mutex is in starvation mode,
   157  				// ownership was handed off to us but mutex is in somewhat
   158  				// inconsistent state: mutexLocked is not set and we are still
   159  				// accounted as waiter. Fix that.
   160  				if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
   161  					throw("sync: inconsistent mutex state")
   162  				}
   163  				delta := int32(mutexLocked - 1<<mutexWaiterShift)
   164  				if !starving || old>>mutexWaiterShift == 1 {
   165  					// Exit starvation mode.
   166  					// Critical to do it here and consider wait time.
   167  					// Starvation mode is so inefficient, that two goroutines
   168  					// can go lock-step infinitely once they switch mutex
   169  					// to starvation mode.
   170  					delta -= mutexStarving
   171  				}
   172  				atomic.AddInt32(&m.state, delta)
   173  				break
   174  			}
   175  			awoke = true
   176  			iter = 0
   177  		} else {
   178  			old = m.state
   179  		}
   180  	}
   181  
   182  	if race.Enabled {
   183  		race.Acquire(unsafe.Pointer(m))
   184  	}
   185  }
   186  
   187  // Unlock unlocks m.
   188  // It is a run-time error if m is not locked on entry to Unlock.
   189  //
   190  // A locked Mutex is not associated with a particular goroutine.
   191  // It is allowed for one goroutine to lock a Mutex and then
   192  // arrange for another goroutine to unlock it.
   193  func (m *Mutex) Unlock() {
   194  	///MYCODE:
   195  	runtime.Monitor(&m.Info)
   196  	if runtime.BoolRecordTrad {
   197  		runtime.RecordTradOp(&m.Record.PreLoc)
   198  	}
   199  
   200  	if race.Enabled {
   201  		_ = m.state
   202  		race.Release(unsafe.Pointer(m))
   203  	}
   204  
   205  	// Fast path: drop lock bit.
   206  	new := atomic.AddInt32(&m.state, -mutexLocked)
   207  	if new != 0 {
   208  		// Outlined slow path to allow inlining the fast path.
   209  		// To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
   210  		m.unlockSlow(new)
   211  	}
   212  }
   213  
   214  func (m *Mutex) unlockSlow(new int32) {
   215  	if (new+mutexLocked)&mutexLocked == 0 {
   216  		throw("sync: unlock of unlocked mutex")
   217  	}
   218  	if new&mutexStarving == 0 {
   219  		old := new
   220  		for {
   221  			// If there are no waiters or a goroutine has already
   222  			// been woken or grabbed the lock, no need to wake anyone.
   223  			// In starvation mode ownership is directly handed off from unlocking
   224  			// goroutine to the next waiter. We are not part of this chain,
   225  			// since we did not observe mutexStarving when we unlocked the mutex above.
   226  			// So get off the way.
   227  			if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
   228  				return
   229  			}
   230  			// Grab the right to wake someone.
   231  			new = (old - 1<<mutexWaiterShift) | mutexWoken
   232  			if atomic.CompareAndSwapInt32(&m.state, old, new) {
   233  				runtime_Semrelease(&m.sema, false, 1)
   234  				return
   235  			}
   236  			old = m.state
   237  		}
   238  	} else {
   239  		// Starving mode: handoff mutex ownership to the next waiter, and yield
   240  		// our time slice so that the next waiter can start to run immediately.
   241  		// Note: mutexLocked is not set, the waiter will set it after wakeup.
   242  		// But mutex is still considered locked if mutexStarving is set,
   243  		// so new coming goroutines won't acquire it.
   244  		runtime_Semrelease(&m.sema, true, 1)
   245  	}
   246  }