github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/lock_futex.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build dragonfly || freebsd || linux
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	futexsleep(addr *uint32, val uint32, ns int64)
    17  //		Atomically,
    18  //			if *addr == val { sleep }
    19  //		Might be woken up spuriously; that's allowed.
    20  //		Don't sleep longer than ns; ns < 0 means forever.
    21  //
    22  //	futexwakeup(addr *uint32, cnt uint32)
    23  //		If any procs are sleeping on addr, wake up at most cnt.
    24  
    25  const (
    26  	mutex_unlocked = 0
    27  	mutex_locked   = 1
    28  	mutex_sleeping = 2
    29  
    30  	active_spin     = 4
    31  	active_spin_cnt = 30
    32  	passive_spin    = 1
    33  )
    34  
    35  // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    36  // mutex_sleeping means that there is presumably at least one sleeping thread.
    37  // Note that there can be spinning threads during all states - they do not
    38  // affect mutex's state.
    39  
    40  // We use the uintptr mutex.key and note.key as a uint32.
    41  //go:nosplit
    42  func key32(p *uintptr) *uint32 {
    43  	return (*uint32)(unsafe.Pointer(p))
    44  }
    45  
    46  func lock(l *mutex) {
    47  	lockWithRank(l, getLockRank(l))
    48  }
    49  
    50  func lock2(l *mutex) {
    51  	gp := getg()
    52  
    53  	if gp.m.locks < 0 {
    54  		throw("runtime·lock: lock count")
    55  	}
    56  	gp.m.locks++
    57  
    58  	// Speculative grab for lock.
    59  	v := atomic.Xchg(key32(&l.key), mutex_locked)
    60  	if v == mutex_unlocked {
    61  		return
    62  	}
    63  
    64  	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    65  	// depending on whether there is a thread sleeping
    66  	// on this mutex. If we ever change l->key from
    67  	// MUTEX_SLEEPING to some other value, we must be
    68  	// careful to change it back to MUTEX_SLEEPING before
    69  	// returning, to ensure that the sleeping thread gets
    70  	// its wakeup call.
    71  	wait := v
    72  
    73  	// On uniprocessors, no point spinning.
    74  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    75  	spin := 0
    76  	if ncpu > 1 {
    77  		spin = active_spin
    78  	}
    79  	for {
    80  		// Try for lock, spinning.
    81  		for i := 0; i < spin; i++ {
    82  			for l.key == mutex_unlocked {
    83  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    84  					return
    85  				}
    86  			}
    87  			procyield(active_spin_cnt)
    88  		}
    89  
    90  		// Try for lock, rescheduling.
    91  		for i := 0; i < passive_spin; i++ {
    92  			for l.key == mutex_unlocked {
    93  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    94  					return
    95  				}
    96  			}
    97  			osyield()
    98  		}
    99  
   100  		// Sleep.
   101  		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
   102  		if v == mutex_unlocked {
   103  			return
   104  		}
   105  		wait = mutex_sleeping
   106  		futexsleep(key32(&l.key), mutex_sleeping, -1)
   107  	}
   108  }
   109  
   110  func unlock(l *mutex) {
   111  	unlockWithRank(l)
   112  }
   113  
   114  func unlock2(l *mutex) {
   115  	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
   116  	if v == mutex_unlocked {
   117  		throw("unlock of unlocked lock")
   118  	}
   119  	if v == mutex_sleeping {
   120  		futexwakeup(key32(&l.key), 1)
   121  	}
   122  
   123  	gp := getg()
   124  	gp.m.locks--
   125  	if gp.m.locks < 0 {
   126  		throw("runtime·unlock: lock count")
   127  	}
   128  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   129  		gp.stackguard0 = stackPreempt
   130  	}
   131  }
   132  
   133  // One-time notifications.
   134  func noteclear(n *note) {
   135  	n.key = 0
   136  }
   137  
   138  func notewakeup(n *note) {
   139  	old := atomic.Xchg(key32(&n.key), 1)
   140  	if old != 0 {
   141  		print("notewakeup - double wakeup (", old, ")\n")
   142  		throw("notewakeup - double wakeup")
   143  	}
   144  	futexwakeup(key32(&n.key), 1)
   145  }
   146  
   147  func notesleep(n *note) {
   148  	gp := getg()
   149  	if gp != gp.m.g0 {
   150  		throw("notesleep not on g0")
   151  	}
   152  	ns := int64(-1)
   153  	if *cgo_yield != nil {
   154  		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   155  		ns = 10e6
   156  	}
   157  	for atomic.Load(key32(&n.key)) == 0 {
   158  		gp.m.blocked = true
   159  		futexsleep(key32(&n.key), 0, ns)
   160  		if *cgo_yield != nil {
   161  			asmcgocall(*cgo_yield, nil)
   162  		}
   163  		gp.m.blocked = false
   164  	}
   165  }
   166  
   167  // May run with m.p==nil if called from notetsleep, so write barriers
   168  // are not allowed.
   169  //
   170  //go:nosplit
   171  //go:nowritebarrier
   172  func notetsleep_internal(n *note, ns int64) bool {
   173  	gp := getg()
   174  
   175  	if ns < 0 {
   176  		if *cgo_yield != nil {
   177  			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   178  			ns = 10e6
   179  		}
   180  		for atomic.Load(key32(&n.key)) == 0 {
   181  			gp.m.blocked = true
   182  			futexsleep(key32(&n.key), 0, ns)
   183  			if *cgo_yield != nil {
   184  				asmcgocall(*cgo_yield, nil)
   185  			}
   186  			gp.m.blocked = false
   187  		}
   188  		return true
   189  	}
   190  
   191  	if atomic.Load(key32(&n.key)) != 0 {
   192  		return true
   193  	}
   194  
   195  	deadline := nanotime() + ns
   196  	for {
   197  		if *cgo_yield != nil && ns > 10e6 {
   198  			ns = 10e6
   199  		}
   200  		gp.m.blocked = true
   201  		futexsleep(key32(&n.key), 0, ns)
   202  		if *cgo_yield != nil {
   203  			asmcgocall(*cgo_yield, nil)
   204  		}
   205  		gp.m.blocked = false
   206  		if atomic.Load(key32(&n.key)) != 0 {
   207  			break
   208  		}
   209  		now := nanotime()
   210  		if now >= deadline {
   211  			break
   212  		}
   213  		ns = deadline - now
   214  	}
   215  	return atomic.Load(key32(&n.key)) != 0
   216  }
   217  
   218  func notetsleep(n *note, ns int64) bool {
   219  	gp := getg()
   220  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   221  		throw("notetsleep not on g0")
   222  	}
   223  
   224  	return notetsleep_internal(n, ns)
   225  }
   226  
   227  // same as runtime·notetsleep, but called on user g (not g0)
   228  // calls only nosplit functions between entersyscallblock/exitsyscall
   229  func notetsleepg(n *note, ns int64) bool {
   230  	gp := getg()
   231  	if gp == gp.m.g0 {
   232  		throw("notetsleepg on g0")
   233  	}
   234  
   235  	entersyscallblock()
   236  	ok := notetsleep_internal(n, ns)
   237  	exitsyscall()
   238  	return ok
   239  }
   240  
   241  func beforeIdle(int64, int64) (*g, bool) {
   242  	return nil, false
   243  }
   244  
   245  func checkTimeouts() {}