github.com/prattmic/llgo-embedded@v0.0.0-20150820070356-41cfecea0e1e/third_party/gofrontend/libgo/runtime/lock_futex.c (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build dragonfly freebsd linux 6 7 #include "runtime.h" 8 9 // This implementation depends on OS-specific implementations of 10 // 11 // runtime_futexsleep(uint32 *addr, uint32 val, int64 ns) 12 // Atomically, 13 // if(*addr == val) sleep 14 // Might be woken up spuriously; that's allowed. 15 // Don't sleep longer than ns; ns < 0 means forever. 16 // 17 // runtime_futexwakeup(uint32 *addr, uint32 cnt) 18 // If any procs are sleeping on addr, wake up at most cnt. 19 20 enum 21 { 22 MUTEX_UNLOCKED = 0, 23 MUTEX_LOCKED = 1, 24 MUTEX_SLEEPING = 2, 25 26 ACTIVE_SPIN = 4, 27 ACTIVE_SPIN_CNT = 30, 28 PASSIVE_SPIN = 1, 29 }; 30 31 // Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING. 32 // MUTEX_SLEEPING means that there is presumably at least one sleeping thread. 33 // Note that there can be spinning threads during all states - they do not 34 // affect mutex's state. 35 void 36 runtime_lock(Lock *l) 37 { 38 uint32 i, v, wait, spin; 39 40 if(runtime_m()->locks++ < 0) 41 runtime_throw("runtime_lock: lock count"); 42 43 // Speculative grab for lock. 44 v = runtime_xchg((uint32*)&l->key, MUTEX_LOCKED); 45 if(v == MUTEX_UNLOCKED) 46 return; 47 48 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING 49 // depending on whether there is a thread sleeping 50 // on this mutex. If we ever change l->key from 51 // MUTEX_SLEEPING to some other value, we must be 52 // careful to change it back to MUTEX_SLEEPING before 53 // returning, to ensure that the sleeping thread gets 54 // its wakeup call. 55 wait = v; 56 57 // On uniprocessor's, no point spinning. 58 // On multiprocessors, spin for ACTIVE_SPIN attempts. 59 spin = 0; 60 if(runtime_ncpu > 1) 61 spin = ACTIVE_SPIN; 62 63 for(;;) { 64 // Try for lock, spinning. 65 for(i = 0; i < spin; i++) { 66 while(l->key == MUTEX_UNLOCKED) 67 if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait)) 68 return; 69 runtime_procyield(ACTIVE_SPIN_CNT); 70 } 71 72 // Try for lock, rescheduling. 73 for(i=0; i < PASSIVE_SPIN; i++) { 74 while(l->key == MUTEX_UNLOCKED) 75 if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait)) 76 return; 77 runtime_osyield(); 78 } 79 80 // Sleep. 81 v = runtime_xchg((uint32*)&l->key, MUTEX_SLEEPING); 82 if(v == MUTEX_UNLOCKED) 83 return; 84 wait = MUTEX_SLEEPING; 85 runtime_futexsleep((uint32*)&l->key, MUTEX_SLEEPING, -1); 86 } 87 } 88 89 void 90 runtime_unlock(Lock *l) 91 { 92 uint32 v; 93 94 v = runtime_xchg((uint32*)&l->key, MUTEX_UNLOCKED); 95 if(v == MUTEX_UNLOCKED) 96 runtime_throw("unlock of unlocked lock"); 97 if(v == MUTEX_SLEEPING) 98 runtime_futexwakeup((uint32*)&l->key, 1); 99 100 if(--runtime_m()->locks < 0) 101 runtime_throw("runtime_unlock: lock count"); 102 } 103 104 // One-time notifications. 105 void 106 runtime_noteclear(Note *n) 107 { 108 n->key = 0; 109 } 110 111 void 112 runtime_notewakeup(Note *n) 113 { 114 uint32 old; 115 116 old = runtime_xchg((uint32*)&n->key, 1); 117 if(old != 0) { 118 runtime_printf("notewakeup - double wakeup (%d)\n", old); 119 runtime_throw("notewakeup - double wakeup"); 120 } 121 runtime_futexwakeup((uint32*)&n->key, 1); 122 } 123 124 void 125 runtime_notesleep(Note *n) 126 { 127 M *m = runtime_m(); 128 129 /* For gccgo it's OK to sleep in non-g0, and it happens in 130 stoptheworld because we have not implemented preemption. 131 132 if(runtime_g() != runtime_m()->g0) 133 runtime_throw("notesleep not on g0"); 134 */ 135 while(runtime_atomicload((uint32*)&n->key) == 0) { 136 m->blocked = true; 137 runtime_futexsleep((uint32*)&n->key, 0, -1); 138 m->blocked = false; 139 } 140 } 141 142 static bool 143 notetsleep(Note *n, int64 ns, int64 deadline, int64 now) 144 { 145 M *m = runtime_m(); 146 147 // Conceptually, deadline and now are local variables. 148 // They are passed as arguments so that the space for them 149 // does not count against our nosplit stack sequence. 150 151 if(ns < 0) { 152 while(runtime_atomicload((uint32*)&n->key) == 0) { 153 m->blocked = true; 154 runtime_futexsleep((uint32*)&n->key, 0, -1); 155 m->blocked = false; 156 } 157 return true; 158 } 159 160 if(runtime_atomicload((uint32*)&n->key) != 0) 161 return true; 162 163 deadline = runtime_nanotime() + ns; 164 for(;;) { 165 m->blocked = true; 166 runtime_futexsleep((uint32*)&n->key, 0, ns); 167 m->blocked = false; 168 if(runtime_atomicload((uint32*)&n->key) != 0) 169 break; 170 now = runtime_nanotime(); 171 if(now >= deadline) 172 break; 173 ns = deadline - now; 174 } 175 return runtime_atomicload((uint32*)&n->key) != 0; 176 } 177 178 bool 179 runtime_notetsleep(Note *n, int64 ns) 180 { 181 bool res; 182 183 if(runtime_g() != runtime_m()->g0 && !runtime_m()->gcing) 184 runtime_throw("notetsleep not on g0"); 185 186 res = notetsleep(n, ns, 0, 0); 187 return res; 188 } 189 190 // same as runtime_notetsleep, but called on user g (not g0) 191 // calls only nosplit functions between entersyscallblock/exitsyscall 192 bool 193 runtime_notetsleepg(Note *n, int64 ns) 194 { 195 bool res; 196 197 if(runtime_g() == runtime_m()->g0) 198 runtime_throw("notetsleepg on g0"); 199 200 runtime_entersyscallblock(); 201 res = notetsleep(n, ns, 0, 0); 202 runtime_exitsyscall(); 203 return res; 204 }