github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/lock_sema.c (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build darwin netbsd openbsd plan9 windows 6 7 #include "runtime.h" 8 9 // This implementation depends on OS-specific implementations of 10 // 11 // uintptr runtime·semacreate(void) 12 // Create a semaphore, which will be assigned to m->waitsema. 13 // The zero value is treated as absence of any semaphore, 14 // so be sure to return a non-zero value. 15 // 16 // int32 runtime·semasleep(int64 ns) 17 // If ns < 0, acquire m->waitsema and return 0. 18 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. 19 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out. 20 // 21 // int32 runtime·semawakeup(M *mp) 22 // Wake up mp, which is or will soon be sleeping on mp->waitsema. 23 // 24 25 enum 26 { 27 LOCKED = 1, 28 29 ACTIVE_SPIN = 4, 30 ACTIVE_SPIN_CNT = 30, 31 PASSIVE_SPIN = 1, 32 }; 33 34 void 35 runtime·lock(Lock *l) 36 { 37 uintptr v; 38 uint32 i, spin; 39 40 if(m->locks++ < 0) 41 runtime·throw("runtime·lock: lock count"); 42 43 // Speculative grab for lock. 44 if(runtime·casp((void**)&l->key, nil, (void*)LOCKED)) 45 return; 46 47 if(m->waitsema == 0) 48 m->waitsema = runtime·semacreate(); 49 50 // On uniprocessor's, no point spinning. 51 // On multiprocessors, spin for ACTIVE_SPIN attempts. 52 spin = 0; 53 if(runtime·ncpu > 1) 54 spin = ACTIVE_SPIN; 55 56 for(i=0;; i++) { 57 v = (uintptr)runtime·atomicloadp((void**)&l->key); 58 if((v&LOCKED) == 0) { 59 unlocked: 60 if(runtime·casp((void**)&l->key, (void*)v, (void*)(v|LOCKED))) 61 return; 62 i = 0; 63 } 64 if(i<spin) 65 runtime·procyield(ACTIVE_SPIN_CNT); 66 else if(i<spin+PASSIVE_SPIN) 67 runtime·osyield(); 68 else { 69 // Someone else has it. 70 // l->waitm points to a linked list of M's waiting 71 // for this lock, chained through m->nextwaitm. 72 // Queue this M. 73 for(;;) { 74 m->nextwaitm = (void*)(v&~LOCKED); 75 if(runtime·casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED))) 76 break; 77 v = (uintptr)runtime·atomicloadp((void**)&l->key); 78 if((v&LOCKED) == 0) 79 goto unlocked; 80 } 81 if(v&LOCKED) { 82 // Queued. Wait. 83 runtime·semasleep(-1); 84 i = 0; 85 } 86 } 87 } 88 } 89 90 void 91 runtime·unlock(Lock *l) 92 { 93 uintptr v; 94 M *mp; 95 96 if(--m->locks < 0) 97 runtime·throw("runtime·unlock: lock count"); 98 99 for(;;) { 100 v = (uintptr)runtime·atomicloadp((void**)&l->key); 101 if(v == LOCKED) { 102 if(runtime·casp((void**)&l->key, (void*)LOCKED, nil)) 103 break; 104 } else { 105 // Other M's are waiting for the lock. 106 // Dequeue an M. 107 mp = (void*)(v&~LOCKED); 108 if(runtime·casp((void**)&l->key, (void*)v, mp->nextwaitm)) { 109 // Dequeued an M. Wake it. 110 runtime·semawakeup(mp); 111 break; 112 } 113 } 114 } 115 } 116 117 // One-time notifications. 118 void 119 runtime·noteclear(Note *n) 120 { 121 n->key = 0; 122 } 123 124 void 125 runtime·notewakeup(Note *n) 126 { 127 M *mp; 128 129 do 130 mp = runtime·atomicloadp((void**)&n->key); 131 while(!runtime·casp((void**)&n->key, mp, (void*)LOCKED)); 132 133 // Successfully set waitm to LOCKED. 134 // What was it before? 135 if(mp == nil) { 136 // Nothing was waiting. Done. 137 } else if(mp == (M*)LOCKED) { 138 // Two notewakeups! Not allowed. 139 runtime·throw("notewakeup - double wakeup"); 140 } else { 141 // Must be the waiting m. Wake it up. 142 runtime·semawakeup(mp); 143 } 144 } 145 146 void 147 runtime·notesleep(Note *n) 148 { 149 if(m->waitsema == 0) 150 m->waitsema = runtime·semacreate(); 151 if(!runtime·casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup) 152 if(n->key != LOCKED) 153 runtime·throw("notesleep - waitm out of sync"); 154 return; 155 } 156 // Queued. Sleep. 157 if(m->profilehz > 0) 158 runtime·setprof(false); 159 runtime·semasleep(-1); 160 if(m->profilehz > 0) 161 runtime·setprof(true); 162 } 163 164 void 165 runtime·notetsleep(Note *n, int64 ns) 166 { 167 M *mp; 168 int64 deadline, now; 169 170 if(ns < 0) { 171 runtime·notesleep(n); 172 return; 173 } 174 175 if(m->waitsema == 0) 176 m->waitsema = runtime·semacreate(); 177 178 // Register for wakeup on n->waitm. 179 if(!runtime·casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already) 180 if(n->key != LOCKED) 181 runtime·throw("notetsleep - waitm out of sync"); 182 return; 183 } 184 185 if(m->profilehz > 0) 186 runtime·setprof(false); 187 deadline = runtime·nanotime() + ns; 188 for(;;) { 189 // Registered. Sleep. 190 if(runtime·semasleep(ns) >= 0) { 191 // Acquired semaphore, semawakeup unregistered us. 192 // Done. 193 if(m->profilehz > 0) 194 runtime·setprof(true); 195 return; 196 } 197 198 // Interrupted or timed out. Still registered. Semaphore not acquired. 199 now = runtime·nanotime(); 200 if(now >= deadline) 201 break; 202 203 // Deadline hasn't arrived. Keep sleeping. 204 ns = deadline - now; 205 } 206 207 if(m->profilehz > 0) 208 runtime·setprof(true); 209 210 // Deadline arrived. Still registered. Semaphore not acquired. 211 // Want to give up and return, but have to unregister first, 212 // so that any notewakeup racing with the return does not 213 // try to grant us the semaphore when we don't expect it. 214 for(;;) { 215 mp = runtime·atomicloadp((void**)&n->key); 216 if(mp == m) { 217 // No wakeup yet; unregister if possible. 218 if(runtime·casp((void**)&n->key, mp, nil)) 219 return; 220 } else if(mp == (M*)LOCKED) { 221 // Wakeup happened so semaphore is available. 222 // Grab it to avoid getting out of sync. 223 if(runtime·semasleep(-1) < 0) 224 runtime·throw("runtime: unable to acquire - semaphore out of sync"); 225 return; 226 } else { 227 runtime·throw("runtime: unexpected waitm - semaphore out of sync"); 228 } 229 } 230 }