github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/sema.goc (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Semaphore implementation exposed to Go. 6 // Intended use is provide a sleep and wakeup 7 // primitive that can be used in the contended case 8 // of other synchronization primitives. 9 // Thus it targets the same goal as Linux's futex, 10 // but it has much simpler semantics. 11 // 12 // That is, don't think of these as semaphores. 13 // Think of them as a way to implement sleep and wakeup 14 // such that every sleep is paired with a single wakeup, 15 // even if, due to races, the wakeup happens before the sleep. 16 // 17 // See Mullender and Cox, ``Semaphores in Plan 9,'' 18 // http://swtch.com/semaphore.pdf 19 20 package sync 21 #include "runtime.h" 22 #include "arch.h" 23 24 typedef struct SemaWaiter SemaWaiter; 25 struct SemaWaiter 26 { 27 uint32 volatile* addr; 28 G* g; 29 int64 releasetime; 30 int32 nrelease; // -1 for acquire 31 SemaWaiter* prev; 32 SemaWaiter* next; 33 }; 34 35 typedef struct SemaRoot SemaRoot; 36 struct SemaRoot 37 { 38 Lock lock; 39 SemaWaiter* head; 40 SemaWaiter* tail; 41 // Number of waiters. Read w/o the lock. 42 uint32 volatile nwait; 43 }; 44 45 // Prime to not correlate with any user patterns. 46 #define SEMTABLESZ 251 47 48 struct semtable 49 { 50 SemaRoot root; 51 uint8 pad[CacheLineSize-sizeof(SemaRoot)]; 52 }; 53 static struct semtable semtable[SEMTABLESZ]; 54 55 static SemaRoot* 56 semroot(uint32 volatile *addr) 57 { 58 return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root; 59 } 60 61 static void 62 semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s) 63 { 64 s->g = runtime_g(); 65 s->addr = addr; 66 s->next = nil; 67 s->prev = root->tail; 68 if(root->tail) 69 root->tail->next = s; 70 else 71 root->head = s; 72 root->tail = s; 73 } 74 75 static void 76 semdequeue(SemaRoot *root, SemaWaiter *s) 77 { 78 if(s->next) 79 s->next->prev = s->prev; 80 else 81 root->tail = s->prev; 82 if(s->prev) 83 s->prev->next = s->next; 84 else 85 root->head = s->next; 86 s->prev = nil; 87 s->next = nil; 88 } 89 90 static int32 91 cansemacquire(uint32 volatile *addr) 92 { 93 uint32 v; 94 95 while((v = runtime_atomicload(addr)) > 0) 96 if(runtime_cas(addr, v, v-1)) 97 return 1; 98 return 0; 99 } 100 101 void 102 runtime_semacquire(uint32 volatile *addr, bool profile) 103 { 104 SemaWaiter s; // Needs to be allocated on stack, otherwise garbage collector could deallocate it 105 SemaRoot *root; 106 int64 t0; 107 108 // Easy case. 109 if(cansemacquire(addr)) 110 return; 111 112 // Harder case: 113 // increment waiter count 114 // try cansemacquire one more time, return if succeeded 115 // enqueue itself as a waiter 116 // sleep 117 // (waiter descriptor is dequeued by signaler) 118 root = semroot(addr); 119 t0 = 0; 120 s.releasetime = 0; 121 if(profile && runtime_blockprofilerate > 0) { 122 t0 = runtime_cputicks(); 123 s.releasetime = -1; 124 } 125 for(;;) { 126 127 runtime_lock(&root->lock); 128 // Add ourselves to nwait to disable "easy case" in semrelease. 129 runtime_xadd(&root->nwait, 1); 130 // Check cansemacquire to avoid missed wakeup. 131 if(cansemacquire(addr)) { 132 runtime_xadd(&root->nwait, -1); 133 runtime_unlock(&root->lock); 134 return; 135 } 136 // Any semrelease after the cansemacquire knows we're waiting 137 // (we set nwait above), so go to sleep. 138 semqueue(root, addr, &s); 139 runtime_parkunlock(&root->lock, "semacquire"); 140 if(cansemacquire(addr)) { 141 if(t0) 142 runtime_blockevent(s.releasetime - t0, 3); 143 return; 144 } 145 } 146 } 147 148 void 149 runtime_semrelease(uint32 volatile *addr) 150 { 151 SemaWaiter *s; 152 SemaRoot *root; 153 154 root = semroot(addr); 155 runtime_xadd(addr, 1); 156 157 // Easy case: no waiters? 158 // This check must happen after the xadd, to avoid a missed wakeup 159 // (see loop in semacquire). 160 if(runtime_atomicload(&root->nwait) == 0) 161 return; 162 163 // Harder case: search for a waiter and wake it. 164 runtime_lock(&root->lock); 165 if(runtime_atomicload(&root->nwait) == 0) { 166 // The count is already consumed by another goroutine, 167 // so no need to wake up another goroutine. 168 runtime_unlock(&root->lock); 169 return; 170 } 171 for(s = root->head; s; s = s->next) { 172 if(s->addr == addr) { 173 runtime_xadd(&root->nwait, -1); 174 semdequeue(root, s); 175 break; 176 } 177 } 178 runtime_unlock(&root->lock); 179 if(s) { 180 if(s->releasetime) 181 s->releasetime = runtime_cputicks(); 182 runtime_ready(s->g); 183 } 184 } 185 186 // TODO(dvyukov): move to netpoll.goc once it's used by all OSes. 187 void net_runtime_Semacquire(uint32 *addr) 188 __asm__ (GOSYM_PREFIX "net.runtime_Semacquire"); 189 190 void net_runtime_Semacquire(uint32 *addr) 191 { 192 runtime_semacquire(addr, true); 193 } 194 195 void net_runtime_Semrelease(uint32 *addr) 196 __asm__ (GOSYM_PREFIX "net.runtime_Semrelease"); 197 198 void net_runtime_Semrelease(uint32 *addr) 199 { 200 runtime_semrelease(addr); 201 } 202 203 func runtime_Semacquire(addr *uint32) { 204 runtime_semacquire(addr, true); 205 } 206 207 func runtime_Semrelease(addr *uint32) { 208 runtime_semrelease(addr); 209 } 210 211 typedef struct SyncSema SyncSema; 212 struct SyncSema 213 { 214 Lock lock; 215 SemaWaiter* head; 216 SemaWaiter* tail; 217 }; 218 219 func runtime_Syncsemcheck(size uintptr) { 220 if(size != sizeof(SyncSema)) { 221 runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema)); 222 runtime_throw("bad SyncSema size"); 223 } 224 } 225 226 // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s. 227 func runtime_Syncsemacquire(s *SyncSema) { 228 SemaWaiter w, *wake; 229 int64 t0; 230 231 w.g = runtime_g(); 232 w.nrelease = -1; 233 w.next = nil; 234 w.releasetime = 0; 235 t0 = 0; 236 if(runtime_blockprofilerate > 0) { 237 t0 = runtime_cputicks(); 238 w.releasetime = -1; 239 } 240 241 runtime_lock(&s->lock); 242 if(s->head && s->head->nrelease > 0) { 243 // have pending release, consume it 244 wake = nil; 245 s->head->nrelease--; 246 if(s->head->nrelease == 0) { 247 wake = s->head; 248 s->head = wake->next; 249 if(s->head == nil) 250 s->tail = nil; 251 } 252 runtime_unlock(&s->lock); 253 if(wake) 254 runtime_ready(wake->g); 255 } else { 256 // enqueue itself 257 if(s->tail == nil) 258 s->head = &w; 259 else 260 s->tail->next = &w; 261 s->tail = &w; 262 runtime_parkunlock(&s->lock, "semacquire"); 263 if(t0) 264 runtime_blockevent(w.releasetime - t0, 2); 265 } 266 } 267 268 // Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s. 269 func runtime_Syncsemrelease(s *SyncSema, n uint32) { 270 SemaWaiter w, *wake; 271 272 w.g = runtime_g(); 273 w.nrelease = (int32)n; 274 w.next = nil; 275 w.releasetime = 0; 276 277 runtime_lock(&s->lock); 278 while(w.nrelease > 0 && s->head && s->head->nrelease < 0) { 279 // have pending acquire, satisfy it 280 wake = s->head; 281 s->head = wake->next; 282 if(s->head == nil) 283 s->tail = nil; 284 if(wake->releasetime) 285 wake->releasetime = runtime_cputicks(); 286 runtime_ready(wake->g); 287 w.nrelease--; 288 } 289 if(w.nrelease > 0) { 290 // enqueue itself 291 if(s->tail == nil) 292 s->head = &w; 293 else 294 s->tail->next = &w; 295 s->tail = &w; 296 runtime_parkunlock(&s->lock, "semarelease"); 297 } else 298 runtime_unlock(&s->lock); 299 }