github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/lock_sema.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build aix darwin netbsd openbsd plan9 solaris windows
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	func semacreate(mp *m)
    17  //		Create a semaphore for mp, if it does not already have one.
    18  //
    19  //	func semasleep(ns int64) int32
    20  //		If ns < 0, acquire m's semaphore and return 0.
    21  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    22  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    23  //
    24  //	func semawakeup(mp *m)
    25  //		Wake up mp, which is or will soon be sleeping on its semaphore.
    26  //
    27  const (
    28  	locked uintptr = 1
    29  
    30  	active_spin     = 4
    31  	active_spin_cnt = 30
    32  	passive_spin    = 1
    33  )
    34  
    35  func lock(l *mutex) {
    36  	lockWithRank(l, getLockRank(l))
    37  }
    38  
    39  func lock2(l *mutex) {
    40  	gp := getg()
    41  	if gp.m.locks < 0 {
    42  		throw("runtime·lock: lock count")
    43  	}
    44  	gp.m.locks++
    45  
    46  	// Speculative grab for lock.
    47  	if atomic.Casuintptr(&l.key, 0, locked) {
    48  		return
    49  	}
    50  	semacreate(gp.m)
    51  
    52  	// On uniprocessor's, no point spinning.
    53  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    54  	spin := 0
    55  	if ncpu > 1 {
    56  		spin = active_spin
    57  	}
    58  Loop:
    59  	for i := 0; ; i++ {
    60  		v := atomic.Loaduintptr(&l.key)
    61  		if v&locked == 0 {
    62  			// Unlocked. Try to lock.
    63  			if atomic.Casuintptr(&l.key, v, v|locked) {
    64  				return
    65  			}
    66  			i = 0
    67  		}
    68  		if i < spin {
    69  			procyield(active_spin_cnt)
    70  		} else if i < spin+passive_spin {
    71  			osyield()
    72  		} else {
    73  			// Someone else has it.
    74  			// l->waitm points to a linked list of M's waiting
    75  			// for this lock, chained through m->nextwaitm.
    76  			// Queue this M.
    77  			for {
    78  				gp.m.nextwaitm = muintptr(v &^ locked)
    79  				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
    80  					break
    81  				}
    82  				v = atomic.Loaduintptr(&l.key)
    83  				if v&locked == 0 {
    84  					continue Loop
    85  				}
    86  			}
    87  			if v&locked != 0 {
    88  				// Queued. Wait.
    89  				semasleep(-1)
    90  				i = 0
    91  			}
    92  		}
    93  	}
    94  }
    95  
    96  func unlock(l *mutex) {
    97  	unlockWithRank(l)
    98  }
    99  
   100  //go:nowritebarrier
   101  // We might not be holding a p in this code.
   102  func unlock2(l *mutex) {
   103  	gp := getg()
   104  	var mp *m
   105  	for {
   106  		v := atomic.Loaduintptr(&l.key)
   107  		if v == locked {
   108  			if atomic.Casuintptr(&l.key, locked, 0) {
   109  				break
   110  			}
   111  		} else {
   112  			// Other M's are waiting for the lock.
   113  			// Dequeue an M.
   114  			mp = muintptr(v &^ locked).ptr()
   115  			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
   116  				// Dequeued an M.  Wake it.
   117  				semawakeup(mp)
   118  				break
   119  			}
   120  		}
   121  	}
   122  	gp.m.locks--
   123  	if gp.m.locks < 0 {
   124  		throw("runtime·unlock: lock count")
   125  	}
   126  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   127  		gp.stackguard0 = stackPreempt
   128  	}
   129  }
   130  
   131  // One-time notifications.
   132  func noteclear(n *note) {
   133  	if GOOS == "aix" {
   134  		// On AIX, semaphores might not synchronize the memory in some
   135  		// rare cases. See issue #30189.
   136  		atomic.Storeuintptr(&n.key, 0)
   137  	} else {
   138  		n.key = 0
   139  	}
   140  }
   141  
   142  func notewakeup(n *note) {
   143  	var v uintptr
   144  	for {
   145  		v = atomic.Loaduintptr(&n.key)
   146  		if atomic.Casuintptr(&n.key, v, locked) {
   147  			break
   148  		}
   149  	}
   150  
   151  	// Successfully set waitm to locked.
   152  	// What was it before?
   153  	switch {
   154  	case v == 0:
   155  		// Nothing was waiting. Done.
   156  	case v == locked:
   157  		// Two notewakeups! Not allowed.
   158  		throw("notewakeup - double wakeup")
   159  	default:
   160  		// Must be the waiting m. Wake it up.
   161  		semawakeup((*m)(unsafe.Pointer(v)))
   162  	}
   163  }
   164  
   165  func notesleep(n *note) {
   166  	gp := getg()
   167  	if gp != gp.m.g0 {
   168  		throw("notesleep not on g0")
   169  	}
   170  	semacreate(gp.m)
   171  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   172  		// Must be locked (got wakeup).
   173  		if n.key != locked {
   174  			throw("notesleep - waitm out of sync")
   175  		}
   176  		return
   177  	}
   178  	// Queued. Sleep.
   179  	gp.m.blocked = true
   180  	if *cgo_yield == nil {
   181  		semasleep(-1)
   182  	} else {
   183  		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   184  		const ns = 10e6
   185  		for atomic.Loaduintptr(&n.key) == 0 {
   186  			semasleep(ns)
   187  			asmcgocall(*cgo_yield, nil)
   188  		}
   189  	}
   190  	gp.m.blocked = false
   191  }
   192  
   193  //go:nosplit
   194  func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
   195  	// gp and deadline are logically local variables, but they are written
   196  	// as parameters so that the stack space they require is charged
   197  	// to the caller.
   198  	// This reduces the nosplit footprint of notetsleep_internal.
   199  	gp = getg()
   200  
   201  	// Register for wakeup on n->waitm.
   202  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   203  		// Must be locked (got wakeup).
   204  		if n.key != locked {
   205  			throw("notetsleep - waitm out of sync")
   206  		}
   207  		return true
   208  	}
   209  	if ns < 0 {
   210  		// Queued. Sleep.
   211  		gp.m.blocked = true
   212  		if *cgo_yield == nil {
   213  			semasleep(-1)
   214  		} else {
   215  			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
   216  			const ns = 10e6
   217  			for semasleep(ns) < 0 {
   218  				asmcgocall(*cgo_yield, nil)
   219  			}
   220  		}
   221  		gp.m.blocked = false
   222  		return true
   223  	}
   224  
   225  	deadline = nanotime() + ns
   226  	for {
   227  		// Registered. Sleep.
   228  		gp.m.blocked = true
   229  		if *cgo_yield != nil && ns > 10e6 {
   230  			ns = 10e6
   231  		}
   232  		if semasleep(ns) >= 0 {
   233  			gp.m.blocked = false
   234  			// Acquired semaphore, semawakeup unregistered us.
   235  			// Done.
   236  			return true
   237  		}
   238  		if *cgo_yield != nil {
   239  			asmcgocall(*cgo_yield, nil)
   240  		}
   241  		gp.m.blocked = false
   242  		// Interrupted or timed out. Still registered. Semaphore not acquired.
   243  		ns = deadline - nanotime()
   244  		if ns <= 0 {
   245  			break
   246  		}
   247  		// Deadline hasn't arrived. Keep sleeping.
   248  	}
   249  
   250  	// Deadline arrived. Still registered. Semaphore not acquired.
   251  	// Want to give up and return, but have to unregister first,
   252  	// so that any notewakeup racing with the return does not
   253  	// try to grant us the semaphore when we don't expect it.
   254  	for {
   255  		v := atomic.Loaduintptr(&n.key)
   256  		switch v {
   257  		case uintptr(unsafe.Pointer(gp.m)):
   258  			// No wakeup yet; unregister if possible.
   259  			if atomic.Casuintptr(&n.key, v, 0) {
   260  				return false
   261  			}
   262  		case locked:
   263  			// Wakeup happened so semaphore is available.
   264  			// Grab it to avoid getting out of sync.
   265  			gp.m.blocked = true
   266  			if semasleep(-1) < 0 {
   267  				throw("runtime: unable to acquire - semaphore out of sync")
   268  			}
   269  			gp.m.blocked = false
   270  			return true
   271  		default:
   272  			throw("runtime: unexpected waitm - semaphore out of sync")
   273  		}
   274  	}
   275  }
   276  
   277  func notetsleep(n *note, ns int64) bool {
   278  	gp := getg()
   279  	if gp != gp.m.g0 {
   280  		throw("notetsleep not on g0")
   281  	}
   282  	semacreate(gp.m)
   283  	return notetsleep_internal(n, ns, nil, 0)
   284  }
   285  
   286  // same as runtime·notetsleep, but called on user g (not g0)
   287  // calls only nosplit functions between entersyscallblock/exitsyscall
   288  func notetsleepg(n *note, ns int64) bool {
   289  	gp := getg()
   290  	if gp == gp.m.g0 {
   291  		throw("notetsleepg on g0")
   292  	}
   293  	semacreate(gp.m)
   294  	entersyscallblock()
   295  	ok := notetsleep_internal(n, ns, nil, 0)
   296  	exitsyscall()
   297  	return ok
   298  }
   299  
   300  func beforeIdle(int64) (*g, bool) {
   301  	return nil, false
   302  }
   303  
   304  func checkTimeouts() {}