github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/lock_sema.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build darwin nacl netbsd openbsd plan9 solaris windows
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	func semacreate(mp *m)
    17  //		Create a semaphore for mp, if it does not already have one.
    18  //
    19  //	func semasleep(ns int64) int32
    20  //		If ns < 0, acquire m's semaphore and return 0.
    21  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    22  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    23  //
    24  //	func semawakeup(mp *m)
    25  //		Wake up mp, which is or will soon be sleeping on its semaphore.
    26  //
    27  const (
    28  	locked uintptr = 1
    29  
    30  	active_spin     = 4
    31  	active_spin_cnt = 30
    32  	passive_spin    = 1
    33  )
    34  
    35  func lock(l *mutex) {
    36  	gp := getg()
    37  	if gp.m.locks < 0 {
    38  		throw("runtime·lock: lock count")
    39  	}
    40  	gp.m.locks++
    41  
    42  	// Speculative grab for lock.
    43  	if atomic.Casuintptr(&l.key, 0, locked) {
    44  		return
    45  	}
    46  	semacreate(gp.m)
    47  
    48  	// On uniprocessor's, no point spinning.
    49  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    50  	spin := 0
    51  	if ncpu > 1 {
    52  		spin = active_spin
    53  	}
    54  Loop:
    55  	for i := 0; ; i++ {
    56  		v := atomic.Loaduintptr(&l.key)
    57  		if v&locked == 0 {
    58  			// Unlocked. Try to lock.
    59  			if atomic.Casuintptr(&l.key, v, v|locked) {
    60  				return
    61  			}
    62  			i = 0
    63  		}
    64  		if i < spin {
    65  			procyield(active_spin_cnt)
    66  		} else if i < spin+passive_spin {
    67  			osyield()
    68  		} else {
    69  			// Someone else has it.
    70  			// l->waitm points to a linked list of M's waiting
    71  			// for this lock, chained through m->nextwaitm.
    72  			// Queue this M.
    73  			for {
    74  				gp.m.nextwaitm = v &^ locked
    75  				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
    76  					break
    77  				}
    78  				v = atomic.Loaduintptr(&l.key)
    79  				if v&locked == 0 {
    80  					continue Loop
    81  				}
    82  			}
    83  			if v&locked != 0 {
    84  				// Queued. Wait.
    85  				semasleep(-1)
    86  				i = 0
    87  			}
    88  		}
    89  	}
    90  }
    91  
    92  //go:nowritebarrier
    93  // We might not be holding a p in this code.
    94  func unlock(l *mutex) {
    95  	gp := getg()
    96  	var mp *m
    97  	for {
    98  		v := atomic.Loaduintptr(&l.key)
    99  		if v == locked {
   100  			if atomic.Casuintptr(&l.key, locked, 0) {
   101  				break
   102  			}
   103  		} else {
   104  			// Other M's are waiting for the lock.
   105  			// Dequeue an M.
   106  			mp = (*m)(unsafe.Pointer(v &^ locked))
   107  			if atomic.Casuintptr(&l.key, v, mp.nextwaitm) {
   108  				// Dequeued an M.  Wake it.
   109  				semawakeup(mp)
   110  				break
   111  			}
   112  		}
   113  	}
   114  	gp.m.locks--
   115  	if gp.m.locks < 0 {
   116  		throw("runtime·unlock: lock count")
   117  	}
   118  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   119  		gp.stackguard0 = stackPreempt
   120  	}
   121  }
   122  
   123  // One-time notifications.
   124  func noteclear(n *note) {
   125  	n.key = 0
   126  }
   127  
   128  func notewakeup(n *note) {
   129  	var v uintptr
   130  	for {
   131  		v = atomic.Loaduintptr(&n.key)
   132  		if atomic.Casuintptr(&n.key, v, locked) {
   133  			break
   134  		}
   135  	}
   136  
   137  	// Successfully set waitm to locked.
   138  	// What was it before?
   139  	switch {
   140  	case v == 0:
   141  		// Nothing was waiting. Done.
   142  	case v == locked:
   143  		// Two notewakeups!  Not allowed.
   144  		throw("notewakeup - double wakeup")
   145  	default:
   146  		// Must be the waiting m. Wake it up.
   147  		semawakeup((*m)(unsafe.Pointer(v)))
   148  	}
   149  }
   150  
   151  func notesleep(n *note) {
   152  	gp := getg()
   153  	if gp != gp.m.g0 {
   154  		throw("notesleep not on g0")
   155  	}
   156  	semacreate(gp.m)
   157  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   158  		// Must be locked (got wakeup).
   159  		if n.key != locked {
   160  			throw("notesleep - waitm out of sync")
   161  		}
   162  		return
   163  	}
   164  	// Queued. Sleep.
   165  	gp.m.blocked = true
   166  	semasleep(-1)
   167  	gp.m.blocked = false
   168  }
   169  
   170  //go:nosplit
   171  func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
   172  	// gp and deadline are logically local variables, but they are written
   173  	// as parameters so that the stack space they require is charged
   174  	// to the caller.
   175  	// This reduces the nosplit footprint of notetsleep_internal.
   176  	gp = getg()
   177  
   178  	// Register for wakeup on n->waitm.
   179  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   180  		// Must be locked (got wakeup).
   181  		if n.key != locked {
   182  			throw("notetsleep - waitm out of sync")
   183  		}
   184  		return true
   185  	}
   186  	if ns < 0 {
   187  		// Queued. Sleep.
   188  		gp.m.blocked = true
   189  		semasleep(-1)
   190  		gp.m.blocked = false
   191  		return true
   192  	}
   193  
   194  	deadline = nanotime() + ns
   195  	for {
   196  		// Registered. Sleep.
   197  		gp.m.blocked = true
   198  		if semasleep(ns) >= 0 {
   199  			gp.m.blocked = false
   200  			// Acquired semaphore, semawakeup unregistered us.
   201  			// Done.
   202  			return true
   203  		}
   204  		gp.m.blocked = false
   205  		// Interrupted or timed out. Still registered. Semaphore not acquired.
   206  		ns = deadline - nanotime()
   207  		if ns <= 0 {
   208  			break
   209  		}
   210  		// Deadline hasn't arrived. Keep sleeping.
   211  	}
   212  
   213  	// Deadline arrived. Still registered. Semaphore not acquired.
   214  	// Want to give up and return, but have to unregister first,
   215  	// so that any notewakeup racing with the return does not
   216  	// try to grant us the semaphore when we don't expect it.
   217  	for {
   218  		v := atomic.Loaduintptr(&n.key)
   219  		switch v {
   220  		case uintptr(unsafe.Pointer(gp.m)):
   221  			// No wakeup yet; unregister if possible.
   222  			if atomic.Casuintptr(&n.key, v, 0) {
   223  				return false
   224  			}
   225  		case locked:
   226  			// Wakeup happened so semaphore is available.
   227  			// Grab it to avoid getting out of sync.
   228  			gp.m.blocked = true
   229  			if semasleep(-1) < 0 {
   230  				throw("runtime: unable to acquire - semaphore out of sync")
   231  			}
   232  			gp.m.blocked = false
   233  			return true
   234  		default:
   235  			throw("runtime: unexpected waitm - semaphore out of sync")
   236  		}
   237  	}
   238  }
   239  
   240  func notetsleep(n *note, ns int64) bool {
   241  	gp := getg()
   242  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   243  		throw("notetsleep not on g0")
   244  	}
   245  	semacreate(gp.m)
   246  	return notetsleep_internal(n, ns, nil, 0)
   247  }
   248  
   249  // same as runtime·notetsleep, but called on user g (not g0)
   250  // calls only nosplit functions between entersyscallblock/exitsyscall
   251  func notetsleepg(n *note, ns int64) bool {
   252  	gp := getg()
   253  	if gp == gp.m.g0 {
   254  		throw("notetsleepg on g0")
   255  	}
   256  	semacreate(gp.m)
   257  	entersyscallblock(0)
   258  	ok := notetsleep_internal(n, ns, nil, 0)
   259  	exitsyscall(0)
   260  	return ok
   261  }