github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/lockrank_on.go (about)

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    15  // stopped.
    16  var worldIsStopped atomic.Uint32
    17  
    18  // lockRankStruct is embedded in mutex
    19  type lockRankStruct struct {
    20  	// static lock ranking of the lock
    21  	rank lockRank
    22  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    23  	// 32-bit systems.
    24  	pad int
    25  }
    26  
    27  // lockInit(l *mutex, rank int) sets the rank of lock before it is used.
    28  // If there is no clear place to initialize a lock, then the rank of a lock can be
    29  // specified during the lock call itself via lockWithRank(l *mutex, rank int).
    30  func lockInit(l *mutex, rank lockRank) {
    31  	l.rank = rank
    32  }
    33  
    34  func getLockRank(l *mutex) lockRank {
    35  	return l.rank
    36  }
    37  
    38  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    39  // when acquiring a non-static lock.
    40  //
    41  // Note that we need to be careful about stack splits:
    42  //
    43  // This function is not nosplit, thus it may split at function entry. This may
    44  // introduce a new edge in the lock order, but it is no different from any
    45  // other (nosplit) call before this call (including the call to lock() itself).
    46  //
    47  // However, we switch to the systemstack to record the lock held to ensure that
    48  // we record an accurate lock ordering. e.g., without systemstack, a stack
    49  // split on entry to lock2() would record stack split locks as taken after l,
    50  // even though l is not actually locked yet.
    51  func lockWithRank(l *mutex, rank lockRank) {
    52  	if l == &debuglock || l == &paniclk {
    53  		// debuglock is only used for println/printlock(). Don't do lock
    54  		// rank recording for it, since print/println are used when
    55  		// printing out a lock ordering problem below.
    56  		//
    57  		// paniclk is only used for fatal throw/panic. Don't do lock
    58  		// ranking recording for it, since we throw after reporting a
    59  		// lock ordering problem. Additionally, paniclk may be taken
    60  		// after effectively any lock (anywhere we might panic), which
    61  		// the partial order doesn't cover.
    62  		lock2(l)
    63  		return
    64  	}
    65  	if rank == 0 {
    66  		rank = lockRankLeafRank
    67  	}
    68  	gp := getg()
    69  	// Log the new class.
    70  	systemstack(func() {
    71  		i := gp.m.locksHeldLen
    72  		if i >= len(gp.m.locksHeld) {
    73  			throw("too many locks held concurrently for rank checking")
    74  		}
    75  		gp.m.locksHeld[i].rank = rank
    76  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    77  		gp.m.locksHeldLen++
    78  
    79  		// i is the index of the lock being acquired
    80  		if i > 0 {
    81  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    82  		}
    83  		lock2(l)
    84  	})
    85  }
    86  
    87  // nosplit to ensure it can be called in as many contexts as possible.
    88  //
    89  //go:nosplit
    90  func printHeldLocks(gp *g) {
    91  	if gp.m.locksHeldLen == 0 {
    92  		println("<none>")
    93  		return
    94  	}
    95  
    96  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
    97  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
    98  	}
    99  }
   100  
   101  // acquireLockRank acquires a rank which is not associated with a mutex lock
   102  //
   103  // This function may be called in nosplit context and thus must be nosplit.
   104  //
   105  //go:nosplit
   106  func acquireLockRank(rank lockRank) {
   107  	gp := getg()
   108  	// Log the new class. See comment on lockWithRank.
   109  	systemstack(func() {
   110  		i := gp.m.locksHeldLen
   111  		if i >= len(gp.m.locksHeld) {
   112  			throw("too many locks held concurrently for rank checking")
   113  		}
   114  		gp.m.locksHeld[i].rank = rank
   115  		gp.m.locksHeld[i].lockAddr = 0
   116  		gp.m.locksHeldLen++
   117  
   118  		// i is the index of the lock being acquired
   119  		if i > 0 {
   120  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   121  		}
   122  	})
   123  }
   124  
   125  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   126  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   127  //
   128  //go:systemstack
   129  func checkRanks(gp *g, prevRank, rank lockRank) {
   130  	rankOK := false
   131  	if rank < prevRank {
   132  		// If rank < prevRank, then we definitely have a rank error
   133  		rankOK = false
   134  	} else if rank == lockRankLeafRank {
   135  		// If new lock is a leaf lock, then the preceding lock can
   136  		// be anything except another leaf lock.
   137  		rankOK = prevRank < lockRankLeafRank
   138  	} else {
   139  		// We've now verified the total lock ranking, but we
   140  		// also enforce the partial ordering specified by
   141  		// lockPartialOrder as well. Two locks with the same rank
   142  		// can only be acquired at the same time if explicitly
   143  		// listed in the lockPartialOrder table.
   144  		list := lockPartialOrder[rank]
   145  		for _, entry := range list {
   146  			if entry == prevRank {
   147  				rankOK = true
   148  				break
   149  			}
   150  		}
   151  	}
   152  	if !rankOK {
   153  		printlock()
   154  		println(gp.m.procid, " ======")
   155  		printHeldLocks(gp)
   156  		throw("lock ordering problem")
   157  	}
   158  }
   159  
   160  // See comment on lockWithRank regarding stack splitting.
   161  func unlockWithRank(l *mutex) {
   162  	if l == &debuglock || l == &paniclk {
   163  		// See comment at beginning of lockWithRank.
   164  		unlock2(l)
   165  		return
   166  	}
   167  	gp := getg()
   168  	systemstack(func() {
   169  		found := false
   170  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   171  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   172  				found = true
   173  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   174  				gp.m.locksHeldLen--
   175  				break
   176  			}
   177  		}
   178  		if !found {
   179  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   180  			throw("unlock without matching lock acquire")
   181  		}
   182  		unlock2(l)
   183  	})
   184  }
   185  
   186  // releaseLockRank releases a rank which is not associated with a mutex lock
   187  //
   188  // This function may be called in nosplit context and thus must be nosplit.
   189  //
   190  //go:nosplit
   191  func releaseLockRank(rank lockRank) {
   192  	gp := getg()
   193  	systemstack(func() {
   194  		found := false
   195  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   196  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   197  				found = true
   198  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   199  				gp.m.locksHeldLen--
   200  				break
   201  			}
   202  		}
   203  		if !found {
   204  			println(gp.m.procid, ":", rank.String(), rank)
   205  			throw("lockRank release without matching lockRank acquire")
   206  		}
   207  	})
   208  }
   209  
   210  // See comment on lockWithRank regarding stack splitting.
   211  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   212  	gp := getg()
   213  	if gp.m.locksHeldLen == 0 {
   214  		// No possibility of lock ordering problem if no other locks held
   215  		return
   216  	}
   217  
   218  	systemstack(func() {
   219  		i := gp.m.locksHeldLen
   220  		if i >= len(gp.m.locksHeld) {
   221  			throw("too many locks held concurrently for rank checking")
   222  		}
   223  		// Temporarily add this lock to the locksHeld list, so
   224  		// checkRanks() will print out list, including this lock, if there
   225  		// is a lock ordering problem.
   226  		gp.m.locksHeld[i].rank = rank
   227  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   228  		gp.m.locksHeldLen++
   229  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   230  		gp.m.locksHeldLen--
   231  	})
   232  }
   233  
   234  // nosplit to ensure it can be called in as many contexts as possible.
   235  //
   236  //go:nosplit
   237  func checkLockHeld(gp *g, l *mutex) bool {
   238  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   239  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   240  			return true
   241  		}
   242  	}
   243  	return false
   244  }
   245  
   246  // assertLockHeld throws if l is not held by the caller.
   247  //
   248  // nosplit to ensure it can be called in as many contexts as possible.
   249  //
   250  //go:nosplit
   251  func assertLockHeld(l *mutex) {
   252  	gp := getg()
   253  
   254  	held := checkLockHeld(gp, l)
   255  	if held {
   256  		return
   257  	}
   258  
   259  	// Crash from system stack to avoid splits that may cause
   260  	// additional issues.
   261  	systemstack(func() {
   262  		printlock()
   263  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   264  		printHeldLocks(gp)
   265  		throw("not holding required lock!")
   266  	})
   267  }
   268  
   269  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   270  //
   271  // This is less precise than assertLockHeld, but can be used in places where a
   272  // pointer to the exact mutex is not available.
   273  //
   274  // nosplit to ensure it can be called in as many contexts as possible.
   275  //
   276  //go:nosplit
   277  func assertRankHeld(r lockRank) {
   278  	gp := getg()
   279  
   280  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   281  		if gp.m.locksHeld[i].rank == r {
   282  			return
   283  		}
   284  	}
   285  
   286  	// Crash from system stack to avoid splits that may cause
   287  	// additional issues.
   288  	systemstack(func() {
   289  		printlock()
   290  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   291  		printHeldLocks(gp)
   292  		throw("not holding required lock!")
   293  	})
   294  }
   295  
   296  // worldStopped notes that the world is stopped.
   297  //
   298  // Caller must hold worldsema.
   299  //
   300  // nosplit to ensure it can be called in as many contexts as possible.
   301  //
   302  //go:nosplit
   303  func worldStopped() {
   304  	if stopped := worldIsStopped.Add(1); stopped != 1 {
   305  		systemstack(func() {
   306  			print("world stop count=", stopped, "\n")
   307  			throw("recursive world stop")
   308  		})
   309  	}
   310  }
   311  
   312  // worldStarted that the world is starting.
   313  //
   314  // Caller must hold worldsema.
   315  //
   316  // nosplit to ensure it can be called in as many contexts as possible.
   317  //
   318  //go:nosplit
   319  func worldStarted() {
   320  	if stopped := worldIsStopped.Add(-1); stopped != 0 {
   321  		systemstack(func() {
   322  			print("world stop count=", stopped, "\n")
   323  			throw("released non-stopped world stop")
   324  		})
   325  	}
   326  }
   327  
   328  // nosplit to ensure it can be called in as many contexts as possible.
   329  //
   330  //go:nosplit
   331  func checkWorldStopped() bool {
   332  	stopped := worldIsStopped.Load()
   333  	if stopped > 1 {
   334  		systemstack(func() {
   335  			print("inconsistent world stop count=", stopped, "\n")
   336  			throw("inconsistent world stop count")
   337  		})
   338  	}
   339  
   340  	return stopped == 1
   341  }
   342  
   343  // assertWorldStopped throws if the world is not stopped. It does not check
   344  // which M stopped the world.
   345  //
   346  // nosplit to ensure it can be called in as many contexts as possible.
   347  //
   348  //go:nosplit
   349  func assertWorldStopped() {
   350  	if checkWorldStopped() {
   351  		return
   352  	}
   353  
   354  	throw("world not stopped")
   355  }
   356  
   357  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   358  // passed lock is not held.
   359  //
   360  // nosplit to ensure it can be called in as many contexts as possible.
   361  //
   362  //go:nosplit
   363  func assertWorldStoppedOrLockHeld(l *mutex) {
   364  	if checkWorldStopped() {
   365  		return
   366  	}
   367  
   368  	gp := getg()
   369  	held := checkLockHeld(gp, l)
   370  	if held {
   371  		return
   372  	}
   373  
   374  	// Crash from system stack to avoid splits that may cause
   375  	// additional issues.
   376  	systemstack(func() {
   377  		printlock()
   378  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   379  		println("<no world stop>")
   380  		printHeldLocks(gp)
   381  		throw("no world stop or required lock!")
   382  	})
   383  }