github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/runtime/lock_sema.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build darwin nacl netbsd openbsd plan9 solaris windows
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	func semacreate(mp *m)
    17  //		Create a semaphore for mp, if it does not already have one.
    18  //
    19  //	func semasleep(ns int64) int32
    20  //		If ns < 0, acquire m's semaphore and return 0.
    21  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    22  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    23  //
    24  //	func semawakeup(mp *m)
    25  //		Wake up mp, which is or will soon be sleeping on its semaphore.
    26  //
    27  const (
    28  	locked uintptr = 1
    29  
    30  	active_spin     = 4
    31  	active_spin_cnt = 30
    32  	passive_spin    = 1
    33  )
    34  
    35  func lock(l *mutex) {
    36  	gp := getg()
    37  	if gp.m.locks < 0 {
    38  		throw("runtime·lock: lock count")
    39  	}
    40  	gp.m.locks++
    41  
    42  	// Speculative grab for lock.
    43  	if atomic.Casuintptr(&l.key, 0, locked) {
    44  		return
    45  	}
    46  	semacreate(gp.m)
    47  
    48  	// On uniprocessor's, no point spinning.
    49  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    50  	spin := 0
    51  	if ncpu > 1 {
    52  		spin = active_spin
    53  	}
    54  Loop:
    55  	for i := 0; ; i++ {
    56  		v := atomic.Loaduintptr(&l.key)
    57  		if v&locked == 0 {
    58  			// Unlocked. Try to lock.
    59  			if atomic.Casuintptr(&l.key, v, v|locked) {
    60  				return
    61  			}
    62  			i = 0
    63  		}
    64  		if i < spin {
    65  			procyield(active_spin_cnt)
    66  		} else if i < spin+passive_spin {
    67  			osyield()
    68  		} else {
    69  			// Someone else has it.
    70  			// l->waitm points to a linked list of M's waiting
    71  			// for this lock, chained through m->nextwaitm.
    72  			// Queue this M.
    73  			for {
    74  				gp.m.nextwaitm = v &^ locked
    75  				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
    76  					break
    77  				}
    78  				v = atomic.Loaduintptr(&l.key)
    79  				if v&locked == 0 {
    80  					continue Loop
    81  				}
    82  			}
    83  			if v&locked != 0 {
    84  				// Queued. Wait.
    85  				semasleep(-1)
    86  				i = 0
    87  			}
    88  		}
    89  	}
    90  }
    91  
    92  //go:nowritebarrier
    93  // We might not be holding a p in this code.
    94  func unlock(l *mutex) {
    95  	gp := getg()
    96  	var mp *m
    97  	for {
    98  		v := atomic.Loaduintptr(&l.key)
    99  		if v == locked {
   100  			if atomic.Casuintptr(&l.key, locked, 0) {
   101  				break
   102  			}
   103  		} else {
   104  			// Other M's are waiting for the lock.
   105  			// Dequeue an M.
   106  			mp = (*m)(unsafe.Pointer(v &^ locked))
   107  			if atomic.Casuintptr(&l.key, v, mp.nextwaitm) {
   108  				// Dequeued an M.  Wake it.
   109  				semawakeup(mp)
   110  				break
   111  			}
   112  		}
   113  	}
   114  	gp.m.locks--
   115  	if gp.m.locks < 0 {
   116  		throw("runtime·unlock: lock count")
   117  	}
   118  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   119  		gp.stackguard0 = stackPreempt
   120  	}
   121  }
   122  
   123  // One-time notifications.
   124  func noteclear(n *note) {
   125  	n.key = 0
   126  }
   127  
   128  func notewakeup(n *note) {
   129  	var v uintptr
   130  	for {
   131  		v = atomic.Loaduintptr(&n.key)
   132  		if atomic.Casuintptr(&n.key, v, locked) {
   133  			break
   134  		}
   135  	}
   136  
   137  	// Successfully set waitm to locked.
   138  	// What was it before?
   139  	switch {
   140  	case v == 0:
   141  		// Nothing was waiting. Done.
   142  	case v == locked:
   143  		// Two notewakeups! Not allowed.
   144  		throw("notewakeup - double wakeup")
   145  	default:
   146  		// Must be the waiting m. Wake it up.
   147  		semawakeup((*m)(unsafe.Pointer(v)))
   148  	}
   149  }
   150  
   151  func notesleep(n *note) {
   152  	gp := getg()
   153  	if gp != gp.m.g0 {
   154  		throw("notesleep not on g0")
   155  	}
   156  	semacreate(gp.m)
   157  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   158  		// Must be locked (got wakeup).
   159  		if n.key != locked {
   160  			throw("notesleep - waitm out of sync")
   161  		}
   162  		return
   163  	}
   164  	// Queued. Sleep.
   165  	gp.m.blocked = true
   166  	if *cgo_yield == nil {
   167  		semasleep(-1)
   168  	} else {
   169  		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   170  		const ns = 10e6
   171  		for atomic.Loaduintptr(&n.key) == 0 {
   172  			semasleep(ns)
   173  			asmcgocall(*cgo_yield, nil)
   174  		}
   175  	}
   176  	gp.m.blocked = false
   177  }
   178  
   179  //go:nosplit
   180  func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
   181  	// gp and deadline are logically local variables, but they are written
   182  	// as parameters so that the stack space they require is charged
   183  	// to the caller.
   184  	// This reduces the nosplit footprint of notetsleep_internal.
   185  	gp = getg()
   186  
   187  	// Register for wakeup on n->waitm.
   188  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   189  		// Must be locked (got wakeup).
   190  		if n.key != locked {
   191  			throw("notetsleep - waitm out of sync")
   192  		}
   193  		return true
   194  	}
   195  	if ns < 0 {
   196  		// Queued. Sleep.
   197  		gp.m.blocked = true
   198  		if *cgo_yield == nil {
   199  			semasleep(-1)
   200  		} else {
   201  			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
   202  			const ns = 10e6
   203  			for semasleep(ns) < 0 {
   204  				asmcgocall(*cgo_yield, nil)
   205  			}
   206  		}
   207  		gp.m.blocked = false
   208  		return true
   209  	}
   210  
   211  	deadline = nanotime() + ns
   212  	for {
   213  		// Registered. Sleep.
   214  		gp.m.blocked = true
   215  		if *cgo_yield != nil && ns > 10e6 {
   216  			ns = 10e6
   217  		}
   218  		if semasleep(ns) >= 0 {
   219  			gp.m.blocked = false
   220  			// Acquired semaphore, semawakeup unregistered us.
   221  			// Done.
   222  			return true
   223  		}
   224  		if *cgo_yield != nil {
   225  			asmcgocall(*cgo_yield, nil)
   226  		}
   227  		gp.m.blocked = false
   228  		// Interrupted or timed out. Still registered. Semaphore not acquired.
   229  		ns = deadline - nanotime()
   230  		if ns <= 0 {
   231  			break
   232  		}
   233  		// Deadline hasn't arrived. Keep sleeping.
   234  	}
   235  
   236  	// Deadline arrived. Still registered. Semaphore not acquired.
   237  	// Want to give up and return, but have to unregister first,
   238  	// so that any notewakeup racing with the return does not
   239  	// try to grant us the semaphore when we don't expect it.
   240  	for {
   241  		v := atomic.Loaduintptr(&n.key)
   242  		switch v {
   243  		case uintptr(unsafe.Pointer(gp.m)):
   244  			// No wakeup yet; unregister if possible.
   245  			if atomic.Casuintptr(&n.key, v, 0) {
   246  				return false
   247  			}
   248  		case locked:
   249  			// Wakeup happened so semaphore is available.
   250  			// Grab it to avoid getting out of sync.
   251  			gp.m.blocked = true
   252  			if semasleep(-1) < 0 {
   253  				throw("runtime: unable to acquire - semaphore out of sync")
   254  			}
   255  			gp.m.blocked = false
   256  			return true
   257  		default:
   258  			throw("runtime: unexpected waitm - semaphore out of sync")
   259  		}
   260  	}
   261  }
   262  
   263  func notetsleep(n *note, ns int64) bool {
   264  	gp := getg()
   265  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   266  		throw("notetsleep not on g0")
   267  	}
   268  	semacreate(gp.m)
   269  	return notetsleep_internal(n, ns, nil, 0)
   270  }
   271  
   272  // same as runtime·notetsleep, but called on user g (not g0)
   273  // calls only nosplit functions between entersyscallblock/exitsyscall
   274  func notetsleepg(n *note, ns int64) bool {
   275  	gp := getg()
   276  	if gp == gp.m.g0 {
   277  		throw("notetsleepg on g0")
   278  	}
   279  	semacreate(gp.m)
   280  	entersyscallblock(0)
   281  	ok := notetsleep_internal(n, ns, nil, 0)
   282  	exitsyscall(0)
   283  	return ok
   284  }