rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/runtime/lock_futex.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build dragonfly freebsd linux
     6  
     7  package runtime
     8  
     9  import "unsafe"
    10  
    11  // This implementation depends on OS-specific implementations of
    12  //
    13  //	runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
    14  //		Atomically,
    15  //			if(*addr == val) sleep
    16  //		Might be woken up spuriously; that's allowed.
    17  //		Don't sleep longer than ns; ns < 0 means forever.
    18  //
    19  //	runtime·futexwakeup(uint32 *addr, uint32 cnt)
    20  //		If any procs are sleeping on addr, wake up at most cnt.
    21  
    22  const (
    23  	mutex_unlocked = 0
    24  	mutex_locked   = 1
    25  	mutex_sleeping = 2
    26  
    27  	active_spin     = 4
    28  	active_spin_cnt = 30
    29  	passive_spin    = 1
    30  )
    31  
    32  // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    33  // mutex_sleeping means that there is presumably at least one sleeping thread.
    34  // Note that there can be spinning threads during all states - they do not
    35  // affect mutex's state.
    36  
    37  // We use the uintptr mutex.key and note.key as a uint32.
    38  func key32(p *uintptr) *uint32 {
    39  	return (*uint32)(unsafe.Pointer(p))
    40  }
    41  
    42  func lock(l *mutex) {
    43  	gp := getg()
    44  
    45  	if gp.m.locks < 0 {
    46  		throw("runtime·lock: lock count")
    47  	}
    48  	gp.m.locks++
    49  
    50  	// Speculative grab for lock.
    51  	v := xchg(key32(&l.key), mutex_locked)
    52  	if v == mutex_unlocked {
    53  		return
    54  	}
    55  
    56  	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    57  	// depending on whether there is a thread sleeping
    58  	// on this mutex.  If we ever change l->key from
    59  	// MUTEX_SLEEPING to some other value, we must be
    60  	// careful to change it back to MUTEX_SLEEPING before
    61  	// returning, to ensure that the sleeping thread gets
    62  	// its wakeup call.
    63  	wait := v
    64  
    65  	// On uniprocessors, no point spinning.
    66  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    67  	spin := 0
    68  	if ncpu > 1 {
    69  		spin = active_spin
    70  	}
    71  	for {
    72  		// Try for lock, spinning.
    73  		for i := 0; i < spin; i++ {
    74  			for l.key == mutex_unlocked {
    75  				if cas(key32(&l.key), mutex_unlocked, wait) {
    76  					return
    77  				}
    78  			}
    79  			procyield(active_spin_cnt)
    80  		}
    81  
    82  		// Try for lock, rescheduling.
    83  		for i := 0; i < passive_spin; i++ {
    84  			for l.key == mutex_unlocked {
    85  				if cas(key32(&l.key), mutex_unlocked, wait) {
    86  					return
    87  				}
    88  			}
    89  			osyield()
    90  		}
    91  
    92  		// Sleep.
    93  		v = xchg(key32(&l.key), mutex_sleeping)
    94  		if v == mutex_unlocked {
    95  			return
    96  		}
    97  		wait = mutex_sleeping
    98  		futexsleep(key32(&l.key), mutex_sleeping, -1)
    99  	}
   100  }
   101  
   102  func unlock(l *mutex) {
   103  	v := xchg(key32(&l.key), mutex_unlocked)
   104  	if v == mutex_unlocked {
   105  		throw("unlock of unlocked lock")
   106  	}
   107  	if v == mutex_sleeping {
   108  		futexwakeup(key32(&l.key), 1)
   109  	}
   110  
   111  	gp := getg()
   112  	gp.m.locks--
   113  	if gp.m.locks < 0 {
   114  		throw("runtime·unlock: lock count")
   115  	}
   116  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   117  		gp.stackguard0 = stackPreempt
   118  	}
   119  }
   120  
   121  // One-time notifications.
   122  func noteclear(n *note) {
   123  	n.key = 0
   124  }
   125  
   126  func notewakeup(n *note) {
   127  	old := xchg(key32(&n.key), 1)
   128  	if old != 0 {
   129  		print("notewakeup - double wakeup (", old, ")\n")
   130  		throw("notewakeup - double wakeup")
   131  	}
   132  	futexwakeup(key32(&n.key), 1)
   133  }
   134  
   135  func notesleep(n *note) {
   136  	gp := getg()
   137  	if gp != gp.m.g0 {
   138  		throw("notesleep not on g0")
   139  	}
   140  	for atomicload(key32(&n.key)) == 0 {
   141  		gp.m.blocked = true
   142  		futexsleep(key32(&n.key), 0, -1)
   143  		gp.m.blocked = false
   144  	}
   145  }
   146  
   147  //go:nosplit
   148  func notetsleep_internal(n *note, ns int64) bool {
   149  	gp := getg()
   150  
   151  	if ns < 0 {
   152  		for atomicload(key32(&n.key)) == 0 {
   153  			gp.m.blocked = true
   154  			futexsleep(key32(&n.key), 0, -1)
   155  			gp.m.blocked = false
   156  		}
   157  		return true
   158  	}
   159  
   160  	if atomicload(key32(&n.key)) != 0 {
   161  		return true
   162  	}
   163  
   164  	deadline := nanotime() + ns
   165  	for {
   166  		gp.m.blocked = true
   167  		futexsleep(key32(&n.key), 0, ns)
   168  		gp.m.blocked = false
   169  		if atomicload(key32(&n.key)) != 0 {
   170  			break
   171  		}
   172  		now := nanotime()
   173  		if now >= deadline {
   174  			break
   175  		}
   176  		ns = deadline - now
   177  	}
   178  	return atomicload(key32(&n.key)) != 0
   179  }
   180  
   181  func notetsleep(n *note, ns int64) bool {
   182  	gp := getg()
   183  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   184  		throw("notetsleep not on g0")
   185  	}
   186  
   187  	return notetsleep_internal(n, ns)
   188  }
   189  
   190  // same as runtime·notetsleep, but called on user g (not g0)
   191  // calls only nosplit functions between entersyscallblock/exitsyscall
   192  func notetsleepg(n *note, ns int64) bool {
   193  	gp := getg()
   194  	if gp == gp.m.g0 {
   195  		throw("notetsleepg on g0")
   196  	}
   197  
   198  	entersyscallblock(0)
   199  	ok := notetsleep_internal(n, ns)
   200  	exitsyscall(0)
   201  	return ok
   202  }