github.com/dorkamotorka/go/src@v0.0.0-20230614113921-187095f0e316/sync/mutex.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package sync provides basic synchronization primitives such as mutual 6 // exclusion locks. Other than the Once and WaitGroup types, most are intended 7 // for use by low-level library routines. Higher-level synchronization is 8 // better done via channels and communication. 9 // 10 // Values containing the types defined in this package should not be copied. 11 package sync 12 13 import ( 14 "internal/race" 15 "sync/atomic" 16 "unsafe" 17 ) 18 19 // Provided by runtime via linkname. 20 func throw(string) 21 func fatal(string) 22 23 // A Mutex is a mutual exclusion lock. 24 // The zero value for a Mutex is an unlocked mutex. 25 // 26 // A Mutex must not be copied after first use. 27 // 28 // In the terminology of the Go memory model, 29 // the n'th call to Unlock “synchronizes before” the m'th call to Lock 30 // for any n < m. 31 // A successful call to TryLock is equivalent to a call to Lock. 32 // A failed call to TryLock does not establish any “synchronizes before” 33 // relation at all. 34 type Mutex struct { 35 state int32 36 sema uint32 37 } 38 39 // A Locker represents an object that can be locked and unlocked. 40 type Locker interface { 41 Lock() 42 Unlock() 43 } 44 45 const ( 46 mutexLocked = 1 << iota // mutex is locked 47 mutexWoken 48 mutexStarving 49 mutexWaiterShift = iota 50 51 // Mutex fairness. 52 // 53 // Mutex can be in 2 modes of operations: normal and starvation. 54 // In normal mode waiters are queued in FIFO order, but a woken up waiter 55 // does not own the mutex and competes with new arriving goroutines over 56 // the ownership. New arriving goroutines have an advantage -- they are 57 // already running on CPU and there can be lots of them, so a woken up 58 // waiter has good chances of losing. In such case it is queued at front 59 // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms, 60 // it switches mutex to the starvation mode. 61 // 62 // In starvation mode ownership of the mutex is directly handed off from 63 // the unlocking goroutine to the waiter at the front of the queue. 64 // New arriving goroutines don't try to acquire the mutex even if it appears 65 // to be unlocked, and don't try to spin. Instead they queue themselves at 66 // the tail of the wait queue. 67 // 68 // If a waiter receives ownership of the mutex and sees that either 69 // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms, 70 // it switches mutex back to normal operation mode. 71 // 72 // Normal mode has considerably better performance as a goroutine can acquire 73 // a mutex several times in a row even if there are blocked waiters. 74 // Starvation mode is important to prevent pathological cases of tail latency. 75 starvationThresholdNs = 1e6 76 ) 77 78 // Lock locks m. 79 // If the lock is already in use, the calling goroutine 80 // blocks until the mutex is available. 81 func (m *Mutex) Lock() { 82 // Fast path: grab unlocked mutex. 83 if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { 84 if race.Enabled { 85 race.Acquire(unsafe.Pointer(m)) 86 } 87 return 88 } 89 // Slow path (outlined so that the fast path can be inlined) 90 m.lockSlow() 91 } 92 93 // TryLock tries to lock m and reports whether it succeeded. 94 // 95 // Note that while correct uses of TryLock do exist, they are rare, 96 // and use of TryLock is often a sign of a deeper problem 97 // in a particular use of mutexes. 98 func (m *Mutex) TryLock() bool { 99 old := m.state 100 if old&(mutexLocked|mutexStarving) != 0 { 101 return false 102 } 103 104 // There may be a goroutine waiting for the mutex, but we are 105 // running now and can try to grab the mutex before that 106 // goroutine wakes up. 107 if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) { 108 return false 109 } 110 111 if race.Enabled { 112 race.Acquire(unsafe.Pointer(m)) 113 } 114 return true 115 } 116 117 func (m *Mutex) lockSlow() { 118 var waitStartTime int64 119 starving := false 120 awoke := false 121 iter := 0 122 old := m.state 123 for { 124 // Don't spin in starvation mode, ownership is handed off to waiters 125 // so we won't be able to acquire the mutex anyway. 126 if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) { 127 // Active spinning makes sense. 128 // Try to set mutexWoken flag to inform Unlock 129 // to not wake other blocked goroutines. 130 if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 && 131 atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) { 132 awoke = true 133 } 134 runtime_doSpin() 135 iter++ 136 old = m.state 137 continue 138 } 139 new := old 140 // Don't try to acquire starving mutex, new arriving goroutines must queue. 141 if old&mutexStarving == 0 { 142 new |= mutexLocked 143 } 144 if old&(mutexLocked|mutexStarving) != 0 { 145 new += 1 << mutexWaiterShift 146 } 147 // The current goroutine switches mutex to starvation mode. 148 // But if the mutex is currently unlocked, don't do the switch. 149 // Unlock expects that starving mutex has waiters, which will not 150 // be true in this case. 151 if starving && old&mutexLocked != 0 { 152 new |= mutexStarving 153 } 154 if awoke { 155 // The goroutine has been woken from sleep, 156 // so we need to reset the flag in either case. 157 if new&mutexWoken == 0 { 158 throw("sync: inconsistent mutex state") 159 } 160 new &^= mutexWoken 161 } 162 if atomic.CompareAndSwapInt32(&m.state, old, new) { 163 if old&(mutexLocked|mutexStarving) == 0 { 164 break // locked the mutex with CAS 165 } 166 // If we were already waiting before, queue at the front of the queue. 167 queueLifo := waitStartTime != 0 168 if waitStartTime == 0 { 169 waitStartTime = runtime_nanotime() 170 } 171 runtime_SemacquireMutex(&m.sema, queueLifo, 1) 172 starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs 173 old = m.state 174 if old&mutexStarving != 0 { 175 // If this goroutine was woken and mutex is in starvation mode, 176 // ownership was handed off to us but mutex is in somewhat 177 // inconsistent state: mutexLocked is not set and we are still 178 // accounted as waiter. Fix that. 179 if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 { 180 throw("sync: inconsistent mutex state") 181 } 182 delta := int32(mutexLocked - 1<<mutexWaiterShift) 183 if !starving || old>>mutexWaiterShift == 1 { 184 // Exit starvation mode. 185 // Critical to do it here and consider wait time. 186 // Starvation mode is so inefficient, that two goroutines 187 // can go lock-step infinitely once they switch mutex 188 // to starvation mode. 189 delta -= mutexStarving 190 } 191 atomic.AddInt32(&m.state, delta) 192 break 193 } 194 awoke = true 195 iter = 0 196 } else { 197 old = m.state 198 } 199 } 200 201 if race.Enabled { 202 race.Acquire(unsafe.Pointer(m)) 203 } 204 } 205 206 // Unlock unlocks m. 207 // It is a run-time error if m is not locked on entry to Unlock. 208 // 209 // A locked Mutex is not associated with a particular goroutine. 210 // It is allowed for one goroutine to lock a Mutex and then 211 // arrange for another goroutine to unlock it. 212 func (m *Mutex) Unlock() { 213 if race.Enabled { 214 _ = m.state 215 race.Release(unsafe.Pointer(m)) 216 } 217 218 // Fast path: drop lock bit. 219 new := atomic.AddInt32(&m.state, -mutexLocked) 220 if new != 0 { 221 // Outlined slow path to allow inlining the fast path. 222 // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock. 223 m.unlockSlow(new) 224 } 225 } 226 227 func (m *Mutex) unlockSlow(new int32) { 228 if (new+mutexLocked)&mutexLocked == 0 { 229 fatal("sync: unlock of unlocked mutex") 230 } 231 if new&mutexStarving == 0 { 232 old := new 233 for { 234 // If there are no waiters or a goroutine has already 235 // been woken or grabbed the lock, no need to wake anyone. 236 // In starvation mode ownership is directly handed off from unlocking 237 // goroutine to the next waiter. We are not part of this chain, 238 // since we did not observe mutexStarving when we unlocked the mutex above. 239 // So get off the way. 240 if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 { 241 return 242 } 243 // Grab the right to wake someone. 244 new = (old - 1<<mutexWaiterShift) | mutexWoken 245 if atomic.CompareAndSwapInt32(&m.state, old, new) { 246 runtime_Semrelease(&m.sema, false, 1) 247 return 248 } 249 old = m.state 250 } 251 } else { 252 // Starving mode: handoff mutex ownership to the next waiter, and yield 253 // our time slice so that the next waiter can start to run immediately. 254 // Note: mutexLocked is not set, the waiter will set it after wakeup. 255 // But mutex is still considered locked if mutexStarving is set, 256 // so new coming goroutines won't acquire it. 257 runtime_Semrelease(&m.sema, true, 1) 258 } 259 }