github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/lock_futex.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build dragonfly || freebsd || linux 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "unsafe" 12 ) 13 14 // This implementation depends on OS-specific implementations of 15 // 16 // futexsleep(addr *uint32, val uint32, ns int64) 17 // Atomically, 18 // if *addr == val { sleep } 19 // Might be woken up spuriously; that's allowed. 20 // Don't sleep longer than ns; ns < 0 means forever. 21 // 22 // futexwakeup(addr *uint32, cnt uint32) 23 // If any procs are sleeping on addr, wake up at most cnt. 24 25 const ( 26 mutex_unlocked = 0 27 mutex_locked = 1 28 mutex_sleeping = 2 29 30 active_spin = 4 31 active_spin_cnt = 30 32 passive_spin = 1 33 ) 34 35 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. 36 // mutex_sleeping means that there is presumably at least one sleeping thread. 37 // Note that there can be spinning threads during all states - they do not 38 // affect mutex's state. 39 40 // We use the uintptr mutex.key and note.key as a uint32. 41 // 42 //go:nosplit 43 func key32(p *uintptr) *uint32 { 44 return (*uint32)(unsafe.Pointer(p)) 45 } 46 47 func lock(l *mutex) { 48 lockWithRank(l, getLockRank(l)) 49 } 50 51 func lock2(l *mutex) { 52 gp := getg() 53 54 if gp.m.locks < 0 { 55 throw("runtime·lock: lock count") 56 } 57 gp.m.locks++ 58 59 // Speculative grab for lock. 60 v := atomic.Xchg(key32(&l.key), mutex_locked) 61 if v == mutex_unlocked { 62 return 63 } 64 65 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING 66 // depending on whether there is a thread sleeping 67 // on this mutex. If we ever change l->key from 68 // MUTEX_SLEEPING to some other value, we must be 69 // careful to change it back to MUTEX_SLEEPING before 70 // returning, to ensure that the sleeping thread gets 71 // its wakeup call. 72 wait := v 73 74 // On uniprocessors, no point spinning. 75 // On multiprocessors, spin for ACTIVE_SPIN attempts. 76 spin := 0 77 if ncpu > 1 { 78 spin = active_spin 79 } 80 for { 81 // Try for lock, spinning. 82 for i := 0; i < spin; i++ { 83 for l.key == mutex_unlocked { 84 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { 85 return 86 } 87 } 88 procyield(active_spin_cnt) 89 } 90 91 // Try for lock, rescheduling. 92 for i := 0; i < passive_spin; i++ { 93 for l.key == mutex_unlocked { 94 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { 95 return 96 } 97 } 98 osyield() 99 } 100 101 // Sleep. 102 v = atomic.Xchg(key32(&l.key), mutex_sleeping) 103 if v == mutex_unlocked { 104 return 105 } 106 wait = mutex_sleeping 107 futexsleep(key32(&l.key), mutex_sleeping, -1) 108 } 109 } 110 111 func unlock(l *mutex) { 112 unlockWithRank(l) 113 } 114 115 func unlock2(l *mutex) { 116 v := atomic.Xchg(key32(&l.key), mutex_unlocked) 117 if v == mutex_unlocked { 118 throw("unlock of unlocked lock") 119 } 120 if v == mutex_sleeping { 121 futexwakeup(key32(&l.key), 1) 122 } 123 124 gp := getg() 125 gp.m.locks-- 126 if gp.m.locks < 0 { 127 throw("runtime·unlock: lock count") 128 } 129 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 130 gp.stackguard0 = stackPreempt 131 } 132 } 133 134 // One-time notifications. 135 func noteclear(n *note) { 136 n.key = 0 137 } 138 139 func notewakeup(n *note) { 140 old := atomic.Xchg(key32(&n.key), 1) 141 if old != 0 { 142 print("notewakeup - double wakeup (", old, ")\n") 143 throw("notewakeup - double wakeup") 144 } 145 futexwakeup(key32(&n.key), 1) 146 } 147 148 func notesleep(n *note) { 149 gp := getg() 150 if gp != gp.m.g0 { 151 throw("notesleep not on g0") 152 } 153 ns := int64(-1) 154 if *cgo_yield != nil { 155 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. 156 ns = 10e6 157 } 158 for atomic.Load(key32(&n.key)) == 0 { 159 gp.m.blocked = true 160 futexsleep(key32(&n.key), 0, ns) 161 if *cgo_yield != nil { 162 asmcgocall(*cgo_yield, nil) 163 } 164 gp.m.blocked = false 165 } 166 } 167 168 // May run with m.p==nil if called from notetsleep, so write barriers 169 // are not allowed. 170 // 171 //go:nosplit 172 //go:nowritebarrier 173 func notetsleep_internal(n *note, ns int64) bool { 174 gp := getg() 175 176 if ns < 0 { 177 if *cgo_yield != nil { 178 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. 179 ns = 10e6 180 } 181 for atomic.Load(key32(&n.key)) == 0 { 182 gp.m.blocked = true 183 futexsleep(key32(&n.key), 0, ns) 184 if *cgo_yield != nil { 185 asmcgocall(*cgo_yield, nil) 186 } 187 gp.m.blocked = false 188 } 189 return true 190 } 191 192 if atomic.Load(key32(&n.key)) != 0 { 193 return true 194 } 195 196 deadline := nanotime() + ns 197 for { 198 if *cgo_yield != nil && ns > 10e6 { 199 ns = 10e6 200 } 201 gp.m.blocked = true 202 futexsleep(key32(&n.key), 0, ns) 203 if *cgo_yield != nil { 204 asmcgocall(*cgo_yield, nil) 205 } 206 gp.m.blocked = false 207 if atomic.Load(key32(&n.key)) != 0 { 208 break 209 } 210 now := nanotime() 211 if now >= deadline { 212 break 213 } 214 ns = deadline - now 215 } 216 return atomic.Load(key32(&n.key)) != 0 217 } 218 219 func notetsleep(n *note, ns int64) bool { 220 gp := getg() 221 if gp != gp.m.g0 && gp.m.preemptoff != "" { 222 throw("notetsleep not on g0") 223 } 224 225 return notetsleep_internal(n, ns) 226 } 227 228 // same as runtime·notetsleep, but called on user g (not g0) 229 // calls only nosplit functions between entersyscallblock/exitsyscall. 230 func notetsleepg(n *note, ns int64) bool { 231 gp := getg() 232 if gp == gp.m.g0 { 233 throw("notetsleepg on g0") 234 } 235 236 entersyscallblock() 237 ok := notetsleep_internal(n, ns) 238 exitsyscall() 239 return ok 240 } 241 242 func beforeIdle(int64, int64) (*g, bool) { 243 return nil, false 244 } 245 246 func checkTimeouts() {}