github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/lock_sema.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "unsafe" 12 ) 13 14 // This implementation depends on OS-specific implementations of 15 // 16 // func semacreate(mp *m) 17 // Create a semaphore for mp, if it does not already have one. 18 // 19 // func semasleep(ns int64) int32 20 // If ns < 0, acquire m's semaphore and return 0. 21 // If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds. 22 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out. 23 // 24 // func semawakeup(mp *m) 25 // Wake up mp, which is or will soon be sleeping on its semaphore. 26 const ( 27 locked uintptr = 1 28 29 active_spin = 4 30 active_spin_cnt = 30 31 passive_spin = 1 32 ) 33 34 func lock(l *mutex) { 35 lockWithRank(l, getLockRank(l)) 36 } 37 38 func lock2(l *mutex) { 39 gp := getg() 40 if gp.m.locks < 0 { 41 throw("runtime·lock: lock count") 42 } 43 gp.m.locks++ 44 45 // Speculative grab for lock. 46 if atomic.Casuintptr(&l.key, 0, locked) { 47 return 48 } 49 semacreate(gp.m) 50 51 // On uniprocessor's, no point spinning. 52 // On multiprocessors, spin for ACTIVE_SPIN attempts. 53 spin := 0 54 if ncpu > 1 { 55 spin = active_spin 56 } 57 Loop: 58 for i := 0; ; i++ { 59 v := atomic.Loaduintptr(&l.key) 60 if v&locked == 0 { 61 // Unlocked. Try to lock. 62 if atomic.Casuintptr(&l.key, v, v|locked) { 63 return 64 } 65 i = 0 66 } 67 if i < spin { 68 procyield(active_spin_cnt) 69 } else if i < spin+passive_spin { 70 osyield() 71 } else { 72 // Someone else has it. 73 // l->waitm points to a linked list of M's waiting 74 // for this lock, chained through m->nextwaitm. 75 // Queue this M. 76 for { 77 gp.m.nextwaitm = muintptr(v &^ locked) 78 if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { 79 break 80 } 81 v = atomic.Loaduintptr(&l.key) 82 if v&locked == 0 { 83 continue Loop 84 } 85 } 86 if v&locked != 0 { 87 // Queued. Wait. 88 semasleep(-1) 89 i = 0 90 } 91 } 92 } 93 } 94 95 func unlock(l *mutex) { 96 unlockWithRank(l) 97 } 98 99 // We might not be holding a p in this code. 100 // 101 //go:nowritebarrier 102 func unlock2(l *mutex) { 103 gp := getg() 104 var mp *m 105 for { 106 v := atomic.Loaduintptr(&l.key) 107 if v == locked { 108 if atomic.Casuintptr(&l.key, locked, 0) { 109 break 110 } 111 } else { 112 // Other M's are waiting for the lock. 113 // Dequeue an M. 114 mp = muintptr(v &^ locked).ptr() 115 if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) { 116 // Dequeued an M. Wake it. 117 semawakeup(mp) 118 break 119 } 120 } 121 } 122 gp.m.locks-- 123 if gp.m.locks < 0 { 124 throw("runtime·unlock: lock count") 125 } 126 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 127 gp.stackguard0 = stackPreempt 128 } 129 } 130 131 // One-time notifications. 132 func noteclear(n *note) { 133 if GOOS == "aix" { 134 // On AIX, semaphores might not synchronize the memory in some 135 // rare cases. See issue #30189. 136 atomic.Storeuintptr(&n.key, 0) 137 } else { 138 n.key = 0 139 } 140 } 141 142 func notewakeup(n *note) { 143 var v uintptr 144 for { 145 v = atomic.Loaduintptr(&n.key) 146 if atomic.Casuintptr(&n.key, v, locked) { 147 break 148 } 149 } 150 151 // Successfully set waitm to locked. 152 // What was it before? 153 switch { 154 case v == 0: 155 // Nothing was waiting. Done. 156 case v == locked: 157 // Two notewakeups! Not allowed. 158 throw("notewakeup - double wakeup") 159 default: 160 // Must be the waiting m. Wake it up. 161 semawakeup((*m)(unsafe.Pointer(v))) 162 } 163 } 164 165 func notesleep(n *note) { 166 gp := getg() 167 if gp != gp.m.g0 { 168 throw("notesleep not on g0") 169 } 170 semacreate(gp.m) 171 if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 172 // Must be locked (got wakeup). 173 if n.key != locked { 174 throw("notesleep - waitm out of sync") 175 } 176 return 177 } 178 // Queued. Sleep. 179 gp.m.blocked = true 180 if *cgo_yield == nil { 181 semasleep(-1) 182 } else { 183 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. 184 const ns = 10e6 185 for atomic.Loaduintptr(&n.key) == 0 { 186 semasleep(ns) 187 asmcgocall(*cgo_yield, nil) 188 } 189 } 190 gp.m.blocked = false 191 } 192 193 //go:nosplit 194 func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { 195 // gp and deadline are logically local variables, but they are written 196 // as parameters so that the stack space they require is charged 197 // to the caller. 198 // This reduces the nosplit footprint of notetsleep_internal. 199 gp = getg() 200 201 // Register for wakeup on n->waitm. 202 if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 203 // Must be locked (got wakeup). 204 if n.key != locked { 205 throw("notetsleep - waitm out of sync") 206 } 207 return true 208 } 209 if ns < 0 { 210 // Queued. Sleep. 211 gp.m.blocked = true 212 if *cgo_yield == nil { 213 semasleep(-1) 214 } else { 215 // Sleep in arbitrary-but-moderate intervals to poll libc interceptors. 216 const ns = 10e6 217 for semasleep(ns) < 0 { 218 asmcgocall(*cgo_yield, nil) 219 } 220 } 221 gp.m.blocked = false 222 return true 223 } 224 225 deadline = nanotime() + ns 226 for { 227 // Registered. Sleep. 228 gp.m.blocked = true 229 if *cgo_yield != nil && ns > 10e6 { 230 ns = 10e6 231 } 232 if semasleep(ns) >= 0 { 233 gp.m.blocked = false 234 // Acquired semaphore, semawakeup unregistered us. 235 // Done. 236 return true 237 } 238 if *cgo_yield != nil { 239 asmcgocall(*cgo_yield, nil) 240 } 241 gp.m.blocked = false 242 // Interrupted or timed out. Still registered. Semaphore not acquired. 243 ns = deadline - nanotime() 244 if ns <= 0 { 245 break 246 } 247 // Deadline hasn't arrived. Keep sleeping. 248 } 249 250 // Deadline arrived. Still registered. Semaphore not acquired. 251 // Want to give up and return, but have to unregister first, 252 // so that any notewakeup racing with the return does not 253 // try to grant us the semaphore when we don't expect it. 254 for { 255 v := atomic.Loaduintptr(&n.key) 256 switch v { 257 case uintptr(unsafe.Pointer(gp.m)): 258 // No wakeup yet; unregister if possible. 259 if atomic.Casuintptr(&n.key, v, 0) { 260 return false 261 } 262 case locked: 263 // Wakeup happened so semaphore is available. 264 // Grab it to avoid getting out of sync. 265 gp.m.blocked = true 266 if semasleep(-1) < 0 { 267 throw("runtime: unable to acquire - semaphore out of sync") 268 } 269 gp.m.blocked = false 270 return true 271 default: 272 throw("runtime: unexpected waitm - semaphore out of sync") 273 } 274 } 275 } 276 277 func notetsleep(n *note, ns int64) bool { 278 gp := getg() 279 if gp != gp.m.g0 { 280 throw("notetsleep not on g0") 281 } 282 semacreate(gp.m) 283 return notetsleep_internal(n, ns, nil, 0) 284 } 285 286 // same as runtime·notetsleep, but called on user g (not g0) 287 // calls only nosplit functions between entersyscallblock/exitsyscall. 288 func notetsleepg(n *note, ns int64) bool { 289 gp := getg() 290 if gp == gp.m.g0 { 291 throw("notetsleepg on g0") 292 } 293 semacreate(gp.m) 294 entersyscallblock() 295 ok := notetsleep_internal(n, ns, nil, 0) 296 exitsyscall() 297 return ok 298 } 299 300 func beforeIdle(int64, int64) (*g, bool) { 301 return nil, false 302 } 303 304 func checkTimeouts() {}