github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/sema.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Semaphore implementation exposed to Go. 6 // Intended use is provide a sleep and wakeup 7 // primitive that can be used in the contended case 8 // of other synchronization primitives. 9 // Thus it targets the same goal as Linux's futex, 10 // but it has much simpler semantics. 11 // 12 // That is, don't think of these as semaphores. 13 // Think of them as a way to implement sleep and wakeup 14 // such that every sleep is paired with a single wakeup, 15 // even if, due to races, the wakeup happens before the sleep. 16 // 17 // See Mullender and Cox, ``Semaphores in Plan 9,'' 18 // http://swtch.com/semaphore.pdf 19 20 package runtime 21 22 import ( 23 "runtime/internal/atomic" 24 "runtime/internal/sys" 25 "unsafe" 26 ) 27 28 // Asynchronous semaphore for sync.Mutex. 29 30 type semaRoot struct { 31 lock mutex 32 head *sudog 33 tail *sudog 34 nwait uint32 // Number of waiters. Read w/o the lock. 35 } 36 37 // Prime to not correlate with any user patterns. 38 const semTabSize = 251 39 40 var semtable [semTabSize]struct { 41 root semaRoot 42 pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte 43 } 44 45 //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire 46 func sync_runtime_Semacquire(addr *uint32) { 47 semacquire(addr, true) 48 } 49 50 //go:linkname net_runtime_Semacquire net.runtime_Semacquire 51 func net_runtime_Semacquire(addr *uint32) { 52 semacquire(addr, true) 53 } 54 55 //go:linkname sync_runtime_Semrelease sync.runtime_Semrelease 56 func sync_runtime_Semrelease(addr *uint32) { 57 semrelease(addr) 58 } 59 60 //go:linkname net_runtime_Semrelease net.runtime_Semrelease 61 func net_runtime_Semrelease(addr *uint32) { 62 semrelease(addr) 63 } 64 65 func readyWithTime(s *sudog, traceskip int) { 66 if s.releasetime != 0 { 67 s.releasetime = cputicks() 68 } 69 goready(s.g, traceskip) 70 } 71 72 // Called from runtime. 73 func semacquire(addr *uint32, profile bool) { 74 gp := getg() 75 if gp != gp.m.curg { 76 throw("semacquire not on the G stack") 77 } 78 79 // Easy case. 80 if cansemacquire(addr) { 81 return 82 } 83 84 // Harder case: 85 // increment waiter count 86 // try cansemacquire one more time, return if succeeded 87 // enqueue itself as a waiter 88 // sleep 89 // (waiter descriptor is dequeued by signaler) 90 s := acquireSudog() 91 root := semroot(addr) 92 t0 := int64(0) 93 s.releasetime = 0 94 if profile && blockprofilerate > 0 { 95 t0 = cputicks() 96 s.releasetime = -1 97 } 98 for { 99 lock(&root.lock) 100 // Add ourselves to nwait to disable "easy case" in semrelease. 101 atomic.Xadd(&root.nwait, 1) 102 // Check cansemacquire to avoid missed wakeup. 103 if cansemacquire(addr) { 104 atomic.Xadd(&root.nwait, -1) 105 unlock(&root.lock) 106 break 107 } 108 // Any semrelease after the cansemacquire knows we're waiting 109 // (we set nwait above), so go to sleep. 110 root.queue(addr, s) 111 goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync, 4) 112 if cansemacquire(addr) { 113 break 114 } 115 } 116 if s.releasetime > 0 { 117 blockevent(s.releasetime-t0, 3) 118 } 119 releaseSudog(s) 120 } 121 122 func semrelease(addr *uint32) { 123 root := semroot(addr) 124 atomic.Xadd(addr, 1) 125 126 // Easy case: no waiters? 127 // This check must happen after the xadd, to avoid a missed wakeup 128 // (see loop in semacquire). 129 if atomic.Load(&root.nwait) == 0 { 130 return 131 } 132 133 // Harder case: search for a waiter and wake it. 134 lock(&root.lock) 135 if atomic.Load(&root.nwait) == 0 { 136 // The count is already consumed by another goroutine, 137 // so no need to wake up another goroutine. 138 unlock(&root.lock) 139 return 140 } 141 s := root.head 142 for ; s != nil; s = s.next { 143 if s.elem == unsafe.Pointer(addr) { 144 atomic.Xadd(&root.nwait, -1) 145 root.dequeue(s) 146 break 147 } 148 } 149 unlock(&root.lock) 150 if s != nil { 151 readyWithTime(s, 5) 152 } 153 } 154 155 func semroot(addr *uint32) *semaRoot { 156 return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root 157 } 158 159 func cansemacquire(addr *uint32) bool { 160 for { 161 v := atomic.Load(addr) 162 if v == 0 { 163 return false 164 } 165 if atomic.Cas(addr, v, v-1) { 166 return true 167 } 168 } 169 } 170 171 func (root *semaRoot) queue(addr *uint32, s *sudog) { 172 s.g = getg() 173 s.elem = unsafe.Pointer(addr) 174 s.next = nil 175 s.prev = root.tail 176 if root.tail != nil { 177 root.tail.next = s 178 } else { 179 root.head = s 180 } 181 root.tail = s 182 } 183 184 func (root *semaRoot) dequeue(s *sudog) { 185 if s.next != nil { 186 s.next.prev = s.prev 187 } else { 188 root.tail = s.prev 189 } 190 if s.prev != nil { 191 s.prev.next = s.next 192 } else { 193 root.head = s.next 194 } 195 s.elem = nil 196 s.next = nil 197 s.prev = nil 198 } 199 200 // notifyList is a ticket-based notification list used to implement sync.Cond. 201 // 202 // It must be kept in sync with the sync package. 203 type notifyList struct { 204 // wait is the ticket number of the next waiter. It is atomically 205 // incremented outside the lock. 206 wait uint32 207 208 // notify is the ticket number of the next waiter to be notified. It can 209 // be read outside the lock, but is only written to with lock held. 210 // 211 // Both wait & notify can wrap around, and such cases will be correctly 212 // handled as long as their "unwrapped" difference is bounded by 2^31. 213 // For this not to be the case, we'd need to have 2^31+ goroutines 214 // blocked on the same condvar, which is currently not possible. 215 notify uint32 216 217 // List of parked waiters. 218 lock mutex 219 head *sudog 220 tail *sudog 221 } 222 223 // less checks if a < b, considering a & b running counts that may overflow the 224 // 32-bit range, and that their "unwrapped" difference is always less than 2^31. 225 func less(a, b uint32) bool { 226 return int32(a-b) < 0 227 } 228 229 // notifyListAdd adds the caller to a notify list such that it can receive 230 // notifications. The caller must eventually call notifyListWait to wait for 231 // such a notification, passing the returned ticket number. 232 //go:linkname notifyListAdd sync.runtime_notifyListAdd 233 func notifyListAdd(l *notifyList) uint32 { 234 // This may be called concurrently, for example, when called from 235 // sync.Cond.Wait while holding a RWMutex in read mode. 236 return atomic.Xadd(&l.wait, 1) - 1 237 } 238 239 // notifyListWait waits for a notification. If one has been sent since 240 // notifyListAdd was called, it returns immediately. Otherwise, it blocks. 241 //go:linkname notifyListWait sync.runtime_notifyListWait 242 func notifyListWait(l *notifyList, t uint32) { 243 lock(&l.lock) 244 245 // Return right away if this ticket has already been notified. 246 if less(t, l.notify) { 247 unlock(&l.lock) 248 return 249 } 250 251 // Enqueue itself. 252 s := acquireSudog() 253 s.g = getg() 254 s.ticket = t 255 s.releasetime = 0 256 t0 := int64(0) 257 if blockprofilerate > 0 { 258 t0 = cputicks() 259 s.releasetime = -1 260 } 261 if l.tail == nil { 262 l.head = s 263 } else { 264 l.tail.next = s 265 } 266 l.tail = s 267 goparkunlock(&l.lock, "semacquire", traceEvGoBlockCond, 3) 268 if t0 != 0 { 269 blockevent(s.releasetime-t0, 2) 270 } 271 releaseSudog(s) 272 } 273 274 // notifyListNotifyAll notifies all entries in the list. 275 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll 276 func notifyListNotifyAll(l *notifyList) { 277 // Fast-path: if there are no new waiters since the last notification 278 // we don't need to acquire the lock. 279 if atomic.Load(&l.wait) == atomic.Load(&l.notify) { 280 return 281 } 282 283 // Pull the list out into a local variable, waiters will be readied 284 // outside the lock. 285 lock(&l.lock) 286 s := l.head 287 l.head = nil 288 l.tail = nil 289 290 // Update the next ticket to be notified. We can set it to the current 291 // value of wait because any previous waiters are already in the list 292 // or will notice that they have already been notified when trying to 293 // add themselves to the list. 294 atomic.Store(&l.notify, atomic.Load(&l.wait)) 295 unlock(&l.lock) 296 297 // Go through the local list and ready all waiters. 298 for s != nil { 299 next := s.next 300 s.next = nil 301 readyWithTime(s, 4) 302 s = next 303 } 304 } 305 306 // notifyListNotifyOne notifies one entry in the list. 307 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne 308 func notifyListNotifyOne(l *notifyList) { 309 // Fast-path: if there are no new waiters since the last notification 310 // we don't need to acquire the lock at all. 311 if atomic.Load(&l.wait) == atomic.Load(&l.notify) { 312 return 313 } 314 315 lock(&l.lock) 316 317 // Re-check under the lock if we need to do anything. 318 t := l.notify 319 if t == atomic.Load(&l.wait) { 320 unlock(&l.lock) 321 return 322 } 323 324 // Update the next notify ticket number, and try to find the G that 325 // needs to be notified. If it hasn't made it to the list yet we won't 326 // find it, but it won't park itself once it sees the new notify number. 327 atomic.Store(&l.notify, t+1) 328 for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next { 329 if s.ticket == t { 330 n := s.next 331 if p != nil { 332 p.next = n 333 } else { 334 l.head = n 335 } 336 if n == nil { 337 l.tail = p 338 } 339 unlock(&l.lock) 340 s.next = nil 341 readyWithTime(s, 4) 342 return 343 } 344 } 345 unlock(&l.lock) 346 } 347 348 //go:linkname notifyListCheck sync.runtime_notifyListCheck 349 func notifyListCheck(sz uintptr) { 350 if sz != unsafe.Sizeof(notifyList{}) { 351 print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n") 352 throw("bad notifyList size") 353 } 354 }