github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/lock_futex.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build dragonfly freebsd linux
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	futexsleep(addr *uint32, val uint32, ns int64)
    17  //		Atomically,
    18  //			if *addr == val { sleep }
    19  //		Might be woken up spuriously; that's allowed.
    20  //		Don't sleep longer than ns; ns < 0 means forever.
    21  //
    22  //	futexwakeup(addr *uint32, cnt uint32)
    23  //		If any procs are sleeping on addr, wake up at most cnt.
    24  
    25  const (
    26  	mutex_unlocked = 0
    27  	mutex_locked   = 1
    28  	mutex_sleeping = 2
    29  
    30  	active_spin     = 4
    31  	active_spin_cnt = 30
    32  	passive_spin    = 1
    33  )
    34  
    35  // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    36  // mutex_sleeping means that there is presumably at least one sleeping thread.
    37  // Note that there can be spinning threads during all states - they do not
    38  // affect mutex's state.
    39  
    40  // We use the uintptr mutex.key and note.key as a uint32.
    41  func key32(p *uintptr) *uint32 {
    42  	return (*uint32)(unsafe.Pointer(p))
    43  }
    44  
    45  func lock(l *mutex) {
    46  	gp := getg()
    47  
    48  	if gp.m.locks < 0 {
    49  		throw("runtime·lock: lock count")
    50  	}
    51  	gp.m.locks++
    52  
    53  	// Speculative grab for lock.
    54  	v := atomic.Xchg(key32(&l.key), mutex_locked)
    55  	if v == mutex_unlocked {
    56  		return
    57  	}
    58  
    59  	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    60  	// depending on whether there is a thread sleeping
    61  	// on this mutex. If we ever change l->key from
    62  	// MUTEX_SLEEPING to some other value, we must be
    63  	// careful to change it back to MUTEX_SLEEPING before
    64  	// returning, to ensure that the sleeping thread gets
    65  	// its wakeup call.
    66  	wait := v
    67  
    68  	// On uniprocessors, no point spinning.
    69  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    70  	spin := 0
    71  	if ncpu > 1 {
    72  		spin = active_spin
    73  	}
    74  	for {
    75  		// Try for lock, spinning.
    76  		for i := 0; i < spin; i++ {
    77  			for l.key == mutex_unlocked {
    78  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    79  					return
    80  				}
    81  			}
    82  			procyield(active_spin_cnt)
    83  		}
    84  
    85  		// Try for lock, rescheduling.
    86  		for i := 0; i < passive_spin; i++ {
    87  			for l.key == mutex_unlocked {
    88  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    89  					return
    90  				}
    91  			}
    92  			osyield()
    93  		}
    94  
    95  		// Sleep.
    96  		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
    97  		if v == mutex_unlocked {
    98  			return
    99  		}
   100  		wait = mutex_sleeping
   101  		futexsleep(key32(&l.key), mutex_sleeping, -1)
   102  	}
   103  }
   104  
   105  func unlock(l *mutex) {
   106  	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
   107  	if v == mutex_unlocked {
   108  		throw("unlock of unlocked lock")
   109  	}
   110  	if v == mutex_sleeping {
   111  		futexwakeup(key32(&l.key), 1)
   112  	}
   113  
   114  	gp := getg()
   115  	gp.m.locks--
   116  	if gp.m.locks < 0 {
   117  		throw("runtime·unlock: lock count")
   118  	}
   119  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   120  		gp.stackguard0 = stackPreempt
   121  	}
   122  }
   123  
   124  // One-time notifications.
   125  func noteclear(n *note) {
   126  	n.key = 0
   127  }
   128  
   129  func notewakeup(n *note) {
   130  	old := atomic.Xchg(key32(&n.key), 1)
   131  	if old != 0 {
   132  		print("notewakeup - double wakeup (", old, ")\n")
   133  		throw("notewakeup - double wakeup")
   134  	}
   135  	futexwakeup(key32(&n.key), 1)
   136  }
   137  
   138  func notesleep(n *note) {
   139  	gp := getg()
   140  	if gp != gp.m.g0 {
   141  		throw("notesleep not on g0")
   142  	}
   143  	for atomic.Load(key32(&n.key)) == 0 {
   144  		gp.m.blocked = true
   145  		futexsleep(key32(&n.key), 0, -1)
   146  		gp.m.blocked = false
   147  	}
   148  }
   149  
   150  // May run with m.p==nil if called from notetsleep, so write barriers
   151  // are not allowed.
   152  //
   153  //go:nosplit
   154  //go:nowritebarrier
   155  func notetsleep_internal(n *note, ns int64) bool {
   156  	gp := getg()
   157  
   158  	if ns < 0 {
   159  		for atomic.Load(key32(&n.key)) == 0 {
   160  			gp.m.blocked = true
   161  			futexsleep(key32(&n.key), 0, -1)
   162  			gp.m.blocked = false
   163  		}
   164  		return true
   165  	}
   166  
   167  	if atomic.Load(key32(&n.key)) != 0 {
   168  		return true
   169  	}
   170  
   171  	deadline := nanotime() + ns
   172  	for {
   173  		gp.m.blocked = true
   174  		futexsleep(key32(&n.key), 0, ns)
   175  		gp.m.blocked = false
   176  		if atomic.Load(key32(&n.key)) != 0 {
   177  			break
   178  		}
   179  		now := nanotime()
   180  		if now >= deadline {
   181  			break
   182  		}
   183  		ns = deadline - now
   184  	}
   185  	return atomic.Load(key32(&n.key)) != 0
   186  }
   187  
   188  func notetsleep(n *note, ns int64) bool {
   189  	gp := getg()
   190  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   191  		throw("notetsleep not on g0")
   192  	}
   193  
   194  	return notetsleep_internal(n, ns)
   195  }
   196  
   197  // same as runtime·notetsleep, but called on user g (not g0)
   198  // calls only nosplit functions between entersyscallblock/exitsyscall
   199  func notetsleepg(n *note, ns int64) bool {
   200  	gp := getg()
   201  	if gp == gp.m.g0 {
   202  		throw("notetsleepg on g0")
   203  	}
   204  
   205  	entersyscallblock(0)
   206  	ok := notetsleep_internal(n, ns)
   207  	exitsyscall(0)
   208  	return ok
   209  }