github.com/razvanm/vanadium-go-1.3@v0.0.0-20160721203343-4a65068e5915/src/runtime/lock_sema.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build darwin nacl netbsd openbsd plan9 solaris windows 6 7 package runtime 8 9 import "unsafe" 10 11 // This implementation depends on OS-specific implementations of 12 // 13 // uintptr runtime·semacreate(void) 14 // Create a semaphore, which will be assigned to m->waitsema. 15 // The zero value is treated as absence of any semaphore, 16 // so be sure to return a non-zero value. 17 // 18 // int32 runtime·semasleep(int64 ns) 19 // If ns < 0, acquire m->waitsema and return 0. 20 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. 21 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out. 22 // 23 // int32 runtime·semawakeup(M *mp) 24 // Wake up mp, which is or will soon be sleeping on mp->waitsema. 25 // 26 const ( 27 locked uintptr = 1 28 29 active_spin = 4 30 active_spin_cnt = 30 31 passive_spin = 1 32 ) 33 34 func semacreate() uintptr 35 func semasleep(int64) int32 36 func semawakeup(mp *m) 37 38 func lock(l *mutex) { 39 gp := getg() 40 if gp.m.locks < 0 { 41 gothrow("runtime·lock: lock count") 42 } 43 gp.m.locks++ 44 45 // Speculative grab for lock. 46 if casuintptr(&l.key, 0, locked) { 47 return 48 } 49 if gp.m.waitsema == 0 { 50 gp.m.waitsema = semacreate() 51 } 52 53 // On uniprocessor's, no point spinning. 54 // On multiprocessors, spin for ACTIVE_SPIN attempts. 55 spin := 0 56 if ncpu > 1 { 57 spin = active_spin 58 } 59 Loop: 60 for i := 0; ; i++ { 61 v := atomicloaduintptr(&l.key) 62 if v&locked == 0 { 63 // Unlocked. Try to lock. 64 if casuintptr(&l.key, v, v|locked) { 65 return 66 } 67 i = 0 68 } 69 if i < spin { 70 procyield(active_spin_cnt) 71 } else if i < spin+passive_spin { 72 osyield() 73 } else { 74 // Someone else has it. 75 // l->waitm points to a linked list of M's waiting 76 // for this lock, chained through m->nextwaitm. 77 // Queue this M. 78 for { 79 gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked)) 80 if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { 81 break 82 } 83 v = atomicloaduintptr(&l.key) 84 if v&locked == 0 { 85 continue Loop 86 } 87 } 88 if v&locked != 0 { 89 // Queued. Wait. 90 semasleep(-1) 91 i = 0 92 } 93 } 94 } 95 } 96 97 func unlock(l *mutex) { 98 gp := getg() 99 var mp *m 100 for { 101 v := atomicloaduintptr(&l.key) 102 if v == locked { 103 if casuintptr(&l.key, locked, 0) { 104 break 105 } 106 } else { 107 // Other M's are waiting for the lock. 108 // Dequeue an M. 109 mp = (*m)((unsafe.Pointer)(v &^ locked)) 110 if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) { 111 // Dequeued an M. Wake it. 112 semawakeup(mp) 113 break 114 } 115 } 116 } 117 gp.m.locks-- 118 if gp.m.locks < 0 { 119 gothrow("runtime·unlock: lock count") 120 } 121 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 122 gp.stackguard0 = stackPreempt 123 } 124 } 125 126 // One-time notifications. 127 func noteclear(n *note) { 128 n.key = 0 129 } 130 131 func notewakeup(n *note) { 132 var v uintptr 133 for { 134 v = atomicloaduintptr(&n.key) 135 if casuintptr(&n.key, v, locked) { 136 break 137 } 138 } 139 140 // Successfully set waitm to locked. 141 // What was it before? 142 switch { 143 case v == 0: 144 // Nothing was waiting. Done. 145 case v == locked: 146 // Two notewakeups! Not allowed. 147 gothrow("notewakeup - double wakeup") 148 default: 149 // Must be the waiting m. Wake it up. 150 semawakeup((*m)(unsafe.Pointer(v))) 151 } 152 } 153 154 func notesleep(n *note) { 155 gp := getg() 156 if gp != gp.m.g0 { 157 gothrow("notesleep not on g0") 158 } 159 if gp.m.waitsema == 0 { 160 gp.m.waitsema = semacreate() 161 } 162 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 163 // Must be locked (got wakeup). 164 if n.key != locked { 165 gothrow("notesleep - waitm out of sync") 166 } 167 return 168 } 169 // Queued. Sleep. 170 gp.m.blocked = true 171 semasleep(-1) 172 gp.m.blocked = false 173 } 174 175 //go:nosplit 176 func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { 177 // gp and deadline are logically local variables, but they are written 178 // as parameters so that the stack space they require is charged 179 // to the caller. 180 // This reduces the nosplit footprint of notetsleep_internal. 181 gp = getg() 182 183 // Register for wakeup on n->waitm. 184 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 185 // Must be locked (got wakeup). 186 if n.key != locked { 187 gothrow("notetsleep - waitm out of sync") 188 } 189 return true 190 } 191 if ns < 0 { 192 // Queued. Sleep. 193 gp.m.blocked = true 194 semasleep(-1) 195 gp.m.blocked = false 196 return true 197 } 198 199 deadline = nanotime() + ns 200 for { 201 // Registered. Sleep. 202 gp.m.blocked = true 203 if semasleep(ns) >= 0 { 204 gp.m.blocked = false 205 // Acquired semaphore, semawakeup unregistered us. 206 // Done. 207 return true 208 } 209 gp.m.blocked = false 210 // Interrupted or timed out. Still registered. Semaphore not acquired. 211 ns = deadline - nanotime() 212 if ns <= 0 { 213 break 214 } 215 // Deadline hasn't arrived. Keep sleeping. 216 } 217 218 // Deadline arrived. Still registered. Semaphore not acquired. 219 // Want to give up and return, but have to unregister first, 220 // so that any notewakeup racing with the return does not 221 // try to grant us the semaphore when we don't expect it. 222 for { 223 v := atomicloaduintptr(&n.key) 224 switch v { 225 case uintptr(unsafe.Pointer(gp.m)): 226 // No wakeup yet; unregister if possible. 227 if casuintptr(&n.key, v, 0) { 228 return false 229 } 230 case locked: 231 // Wakeup happened so semaphore is available. 232 // Grab it to avoid getting out of sync. 233 gp.m.blocked = true 234 if semasleep(-1) < 0 { 235 gothrow("runtime: unable to acquire - semaphore out of sync") 236 } 237 gp.m.blocked = false 238 return true 239 default: 240 gothrow("runtime: unexpected waitm - semaphore out of sync") 241 } 242 } 243 } 244 245 func notetsleep(n *note, ns int64) bool { 246 gp := getg() 247 if gp != gp.m.g0 && gp.m.gcing == 0 { 248 gothrow("notetsleep not on g0") 249 } 250 if gp.m.waitsema == 0 { 251 gp.m.waitsema = semacreate() 252 } 253 return notetsleep_internal(n, ns, nil, 0) 254 } 255 256 // same as runtime·notetsleep, but called on user g (not g0) 257 // calls only nosplit functions between entersyscallblock/exitsyscall 258 func notetsleepg(n *note, ns int64) bool { 259 gp := getg() 260 if gp == gp.m.g0 { 261 gothrow("notetsleepg on g0") 262 } 263 if gp.m.waitsema == 0 { 264 gp.m.waitsema = semacreate() 265 } 266 entersyscallblock() 267 ok := notetsleep_internal(n, ns, nil, 0) 268 exitsyscall() 269 return ok 270 }