github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/lock_futex.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build dragonfly freebsd linux 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "unsafe" 12 ) 13 14 // This implementation depends on OS-specific implementations of 15 // 16 // futexsleep(addr *uint32, val uint32, ns int64) 17 // Atomically, 18 // if *addr == val { sleep } 19 // Might be woken up spuriously; that's allowed. 20 // Don't sleep longer than ns; ns < 0 means forever. 21 // 22 // futexwakeup(addr *uint32, cnt uint32) 23 // If any procs are sleeping on addr, wake up at most cnt. 24 25 const ( 26 mutex_unlocked = 0 27 mutex_locked = 1 28 mutex_sleeping = 2 // 非常可能有至少一个睡眠线程(调用futex) 29 30 active_spin = 4 31 active_spin_cnt = 30 32 passive_spin = 1 33 ) 34 35 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. 36 // mutex_sleeping means that there is presumably at least one sleeping thread. 37 // Note that there can be spinning threads during all states - they do not 38 // affect mutex's state. 39 40 // We use the uintptr mutex.key and note.key as a uint32. 41 //go:nosplit 42 func key32(p *uintptr) *uint32 { 43 return (*uint32)(unsafe.Pointer(p)) 44 } 45 46 // 如果发生阻塞, 会阻塞g, p, m(由于系统调用) 47 func lock(l *mutex) { 48 gp := getg() 49 50 if gp.m.locks < 0 { 51 throw("runtime·lock: lock count") 52 } 53 gp.m.locks++ 54 55 // Speculative grab for lock. 56 // 尝试将key改为锁定状态, 如果原状态是未锁定, 则表示此线程得到了锁, 直接返回即可 57 v := atomic.Xchg(key32(&l.key), mutex_locked) 58 if v == mutex_unlocked { 59 return 60 } 61 62 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING 63 // depending on whether there is a thread sleeping 64 // on this mutex. If we ever change l->key from 65 // MUTEX_SLEEPING to some other value, we must be 66 // careful to change it back to MUTEX_SLEEPING before 67 // returning, to ensure that the sleeping thread gets 68 // its wakeup call. 69 wait := v 70 71 // On uniprocessors, no point spinning. 72 // On multiprocessors, spin for ACTIVE_SPIN attempts. 73 // 单处理器时, 不会自旋; 多处理器会自选active_spin次 74 spin := 0 75 if ncpu > 1 { 76 spin = active_spin 77 } 78 for { 79 // Try for lock, spinning. 80 // 自旋一定次数 81 for i := 0; i < spin; i++ { 82 for l.key == mutex_unlocked { 83 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { 84 return 85 } 86 } 87 // 这个函数会循环active_spin_cnt次数, 每次都会执行pause指令 88 procyield(active_spin_cnt) 89 } 90 91 // Try for lock, rescheduling. 92 // 让处理器重新调度一定次数 93 for i := 0; i < passive_spin; i++ { 94 for l.key == mutex_unlocked { 95 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { 96 return 97 } 98 } 99 osyield() 100 } 101 102 // Sleep. 103 v = atomic.Xchg(key32(&l.key), mutex_sleeping) 104 if v == mutex_unlocked { 105 return 106 } 107 wait = mutex_sleeping 108 // 只有状态为mutex_sleeping时才被内核阻塞 109 futexsleep(key32(&l.key), mutex_sleeping, -1) // 可能被错误唤醒, 所以要在循环里 110 } 111 } 112 113 func unlock(l *mutex) { 114 v := atomic.Xchg(key32(&l.key), mutex_unlocked) 115 if v == mutex_unlocked { 116 throw("unlock of unlocked lock") 117 } 118 // 只有原状态为mutex_sleeping时才唤醒一个线程 119 if v == mutex_sleeping { 120 futexwakeup(key32(&l.key), 1) 121 } 122 123 gp := getg() 124 gp.m.locks-- 125 if gp.m.locks < 0 { 126 throw("runtime·unlock: lock count") 127 } 128 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 129 gp.stackguard0 = stackPreempt 130 } 131 } 132 133 // One-time notifications. 134 func noteclear(n *note) { 135 n.key = 0 136 } 137 138 func notewakeup(n *note) { 139 old := atomic.Xchg(key32(&n.key), 1) 140 if old != 0 { 141 print("notewakeup - double wakeup (", old, ")\n") 142 throw("notewakeup - double wakeup") 143 } 144 futexwakeup(key32(&n.key), 1) 145 } 146 147 func notesleep(n *note) { 148 gp := getg() 149 if gp != gp.m.g0 { 150 throw("notesleep not on g0") 151 } 152 ns := int64(-1) 153 if *cgo_yield != nil { 154 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. 155 ns = 10e6 156 } 157 for atomic.Load(key32(&n.key)) == 0 { 158 gp.m.blocked = true 159 futexsleep(key32(&n.key), 0, ns) 160 if *cgo_yield != nil { 161 asmcgocall(*cgo_yield, nil) 162 } 163 gp.m.blocked = false 164 } 165 } 166 167 // May run with m.p==nil if called from notetsleep, so write barriers 168 // are not allowed. 169 // 170 //go:nosplit 171 //go:nowritebarrier 172 func notetsleep_internal(n *note, ns int64) bool { 173 gp := getg() 174 175 if ns < 0 { 176 if *cgo_yield != nil { 177 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. 178 ns = 10e6 179 } 180 for atomic.Load(key32(&n.key)) == 0 { 181 gp.m.blocked = true 182 futexsleep(key32(&n.key), 0, ns) 183 if *cgo_yield != nil { 184 asmcgocall(*cgo_yield, nil) 185 } 186 gp.m.blocked = false 187 } 188 return true 189 } 190 191 if atomic.Load(key32(&n.key)) != 0 { 192 return true 193 } 194 195 deadline := nanotime() + ns 196 for { 197 if *cgo_yield != nil && ns > 10e6 { 198 ns = 10e6 199 } 200 gp.m.blocked = true 201 futexsleep(key32(&n.key), 0, ns) 202 if *cgo_yield != nil { 203 asmcgocall(*cgo_yield, nil) 204 } 205 gp.m.blocked = false 206 if atomic.Load(key32(&n.key)) != 0 { 207 break 208 } 209 now := nanotime() 210 if now >= deadline { 211 break 212 } 213 ns = deadline - now 214 } 215 return atomic.Load(key32(&n.key)) != 0 216 } 217 218 func notetsleep(n *note, ns int64) bool { 219 gp := getg() 220 if gp != gp.m.g0 && gp.m.preemptoff != "" { 221 throw("notetsleep not on g0") 222 } 223 224 return notetsleep_internal(n, ns) 225 } 226 227 // same as runtime·notetsleep, but called on user g (not g0) 228 // calls only nosplit functions between entersyscallblock/exitsyscall 229 func notetsleepg(n *note, ns int64) bool { 230 gp := getg() 231 if gp == gp.m.g0 { 232 throw("notetsleepg on g0") 233 } 234 235 entersyscallblock(0) 236 ok := notetsleep_internal(n, ns) 237 exitsyscall(0) 238 return ok 239 }