github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/lockrank_on.go (about)

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    15  // stopped.
    16  var worldIsStopped uint32
    17  
    18  // lockRankStruct is embedded in mutex
    19  type lockRankStruct struct {
    20  	// static lock ranking of the lock
    21  	rank lockRank
    22  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    23  	// 32-bit systems.
    24  	pad int
    25  }
    26  
    27  // init checks that the partial order in lockPartialOrder fits within the total
    28  // order determined by the order of the lockRank constants.
    29  func init() {
    30  	for rank, list := range lockPartialOrder {
    31  		for _, entry := range list {
    32  			if entry > lockRank(rank) {
    33  				println("lockPartial order row", lockRank(rank).String(), "entry", entry.String())
    34  				throw("lockPartialOrder table is inconsistent with total lock ranking order")
    35  			}
    36  		}
    37  	}
    38  }
    39  
    40  func lockInit(l *mutex, rank lockRank) {
    41  	l.rank = rank
    42  }
    43  
    44  func getLockRank(l *mutex) lockRank {
    45  	return l.rank
    46  }
    47  
    48  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    49  // when acquiring a non-static lock.
    50  //
    51  // Note that we need to be careful about stack splits:
    52  //
    53  // This function is not nosplit, thus it may split at function entry. This may
    54  // introduce a new edge in the lock order, but it is no different from any
    55  // other (nosplit) call before this call (including the call to lock() itself).
    56  //
    57  // However, we switch to the systemstack to record the lock held to ensure that
    58  // we record an accurate lock ordering. e.g., without systemstack, a stack
    59  // split on entry to lock2() would record stack split locks as taken after l,
    60  // even though l is not actually locked yet.
    61  func lockWithRank(l *mutex, rank lockRank) {
    62  	if l == &debuglock || l == &paniclk {
    63  		// debuglock is only used for println/printlock(). Don't do lock
    64  		// rank recording for it, since print/println are used when
    65  		// printing out a lock ordering problem below.
    66  		//
    67  		// paniclk has an ordering problem, since it can be acquired
    68  		// during a panic with any other locks held (especially if the
    69  		// panic is because of a directed segv), and yet also allg is
    70  		// acquired after paniclk in tracebackothers()). This is a genuine
    71  		// problem, so for now we don't do lock rank recording for paniclk
    72  		// either.
    73  		lock2(l)
    74  		return
    75  	}
    76  	if rank == 0 {
    77  		rank = lockRankLeafRank
    78  	}
    79  	gp := getg()
    80  	// Log the new class.
    81  	systemstack(func() {
    82  		i := gp.m.locksHeldLen
    83  		if i >= len(gp.m.locksHeld) {
    84  			throw("too many locks held concurrently for rank checking")
    85  		}
    86  		gp.m.locksHeld[i].rank = rank
    87  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    88  		gp.m.locksHeldLen++
    89  
    90  		// i is the index of the lock being acquired
    91  		if i > 0 {
    92  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    93  		}
    94  		lock2(l)
    95  	})
    96  }
    97  
    98  // nosplit to ensure it can be called in as many contexts as possible.
    99  //go:nosplit
   100  func printHeldLocks(gp *g) {
   101  	if gp.m.locksHeldLen == 0 {
   102  		println("<none>")
   103  		return
   104  	}
   105  
   106  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
   107  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
   108  	}
   109  }
   110  
   111  // acquireLockRank acquires a rank which is not associated with a mutex lock
   112  //
   113  // This function may be called in nosplit context and thus must be nosplit.
   114  //go:nosplit
   115  func acquireLockRank(rank lockRank) {
   116  	gp := getg()
   117  	// Log the new class. See comment on lockWithRank.
   118  	systemstack(func() {
   119  		i := gp.m.locksHeldLen
   120  		if i >= len(gp.m.locksHeld) {
   121  			throw("too many locks held concurrently for rank checking")
   122  		}
   123  		gp.m.locksHeld[i].rank = rank
   124  		gp.m.locksHeld[i].lockAddr = 0
   125  		gp.m.locksHeldLen++
   126  
   127  		// i is the index of the lock being acquired
   128  		if i > 0 {
   129  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   130  		}
   131  	})
   132  }
   133  
   134  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   135  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   136  //
   137  //go:systemstack
   138  func checkRanks(gp *g, prevRank, rank lockRank) {
   139  	rankOK := false
   140  	if rank < prevRank {
   141  		// If rank < prevRank, then we definitely have a rank error
   142  		rankOK = false
   143  	} else if rank == lockRankLeafRank {
   144  		// If new lock is a leaf lock, then the preceding lock can
   145  		// be anything except another leaf lock.
   146  		rankOK = prevRank < lockRankLeafRank
   147  	} else {
   148  		// We've now verified the total lock ranking, but we
   149  		// also enforce the partial ordering specified by
   150  		// lockPartialOrder as well. Two locks with the same rank
   151  		// can only be acquired at the same time if explicitly
   152  		// listed in the lockPartialOrder table.
   153  		list := lockPartialOrder[rank]
   154  		for _, entry := range list {
   155  			if entry == prevRank {
   156  				rankOK = true
   157  				break
   158  			}
   159  		}
   160  	}
   161  	if !rankOK {
   162  		printlock()
   163  		println(gp.m.procid, " ======")
   164  		printHeldLocks(gp)
   165  		throw("lock ordering problem")
   166  	}
   167  }
   168  
   169  // See comment on lockWithRank regarding stack splitting.
   170  func unlockWithRank(l *mutex) {
   171  	if l == &debuglock || l == &paniclk {
   172  		// See comment at beginning of lockWithRank.
   173  		unlock2(l)
   174  		return
   175  	}
   176  	gp := getg()
   177  	systemstack(func() {
   178  		found := false
   179  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   180  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   181  				found = true
   182  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   183  				gp.m.locksHeldLen--
   184  				break
   185  			}
   186  		}
   187  		if !found {
   188  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   189  			throw("unlock without matching lock acquire")
   190  		}
   191  		unlock2(l)
   192  	})
   193  }
   194  
   195  // releaseLockRank releases a rank which is not associated with a mutex lock
   196  //
   197  // This function may be called in nosplit context and thus must be nosplit.
   198  //go:nosplit
   199  func releaseLockRank(rank lockRank) {
   200  	gp := getg()
   201  	systemstack(func() {
   202  		found := false
   203  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   204  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   205  				found = true
   206  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   207  				gp.m.locksHeldLen--
   208  				break
   209  			}
   210  		}
   211  		if !found {
   212  			println(gp.m.procid, ":", rank.String(), rank)
   213  			throw("lockRank release without matching lockRank acquire")
   214  		}
   215  	})
   216  }
   217  
   218  // See comment on lockWithRank regarding stack splitting.
   219  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   220  	gp := getg()
   221  	if gp.m.locksHeldLen == 0 {
   222  		// No possibilty of lock ordering problem if no other locks held
   223  		return
   224  	}
   225  
   226  	systemstack(func() {
   227  		i := gp.m.locksHeldLen
   228  		if i >= len(gp.m.locksHeld) {
   229  			throw("too many locks held concurrently for rank checking")
   230  		}
   231  		// Temporarily add this lock to the locksHeld list, so
   232  		// checkRanks() will print out list, including this lock, if there
   233  		// is a lock ordering problem.
   234  		gp.m.locksHeld[i].rank = rank
   235  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   236  		gp.m.locksHeldLen++
   237  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   238  		gp.m.locksHeldLen--
   239  	})
   240  }
   241  
   242  // nosplit to ensure it can be called in as many contexts as possible.
   243  //go:nosplit
   244  func checkLockHeld(gp *g, l *mutex) bool {
   245  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   246  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   247  			return true
   248  		}
   249  	}
   250  	return false
   251  }
   252  
   253  // assertLockHeld throws if l is not held by the caller.
   254  //
   255  // nosplit to ensure it can be called in as many contexts as possible.
   256  //go:nosplit
   257  func assertLockHeld(l *mutex) {
   258  	gp := getg()
   259  
   260  	held := checkLockHeld(gp, l)
   261  	if held {
   262  		return
   263  	}
   264  
   265  	// Crash from system stack to avoid splits that may cause
   266  	// additional issues.
   267  	systemstack(func() {
   268  		printlock()
   269  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   270  		printHeldLocks(gp)
   271  		throw("not holding required lock!")
   272  	})
   273  }
   274  
   275  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   276  //
   277  // This is less precise than assertLockHeld, but can be used in places where a
   278  // pointer to the exact mutex is not available.
   279  //
   280  // nosplit to ensure it can be called in as many contexts as possible.
   281  //go:nosplit
   282  func assertRankHeld(r lockRank) {
   283  	gp := getg()
   284  
   285  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   286  		if gp.m.locksHeld[i].rank == r {
   287  			return
   288  		}
   289  	}
   290  
   291  	// Crash from system stack to avoid splits that may cause
   292  	// additional issues.
   293  	systemstack(func() {
   294  		printlock()
   295  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   296  		printHeldLocks(gp)
   297  		throw("not holding required lock!")
   298  	})
   299  }
   300  
   301  // worldStopped notes that the world is stopped.
   302  //
   303  // Caller must hold worldsema.
   304  //
   305  // nosplit to ensure it can be called in as many contexts as possible.
   306  //go:nosplit
   307  func worldStopped() {
   308  	if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
   309  		systemstack(func() {
   310  			print("world stop count=", stopped, "\n")
   311  			throw("recursive world stop")
   312  		})
   313  	}
   314  }
   315  
   316  // worldStarted that the world is starting.
   317  //
   318  // Caller must hold worldsema.
   319  //
   320  // nosplit to ensure it can be called in as many contexts as possible.
   321  //go:nosplit
   322  func worldStarted() {
   323  	if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
   324  		systemstack(func() {
   325  			print("world stop count=", stopped, "\n")
   326  			throw("released non-stopped world stop")
   327  		})
   328  	}
   329  }
   330  
   331  // nosplit to ensure it can be called in as many contexts as possible.
   332  //go:nosplit
   333  func checkWorldStopped() bool {
   334  	stopped := atomic.Load(&worldIsStopped)
   335  	if stopped > 1 {
   336  		systemstack(func() {
   337  			print("inconsistent world stop count=", stopped, "\n")
   338  			throw("inconsistent world stop count")
   339  		})
   340  	}
   341  
   342  	return stopped == 1
   343  }
   344  
   345  // assertWorldStopped throws if the world is not stopped. It does not check
   346  // which M stopped the world.
   347  //
   348  // nosplit to ensure it can be called in as many contexts as possible.
   349  //go:nosplit
   350  func assertWorldStopped() {
   351  	if checkWorldStopped() {
   352  		return
   353  	}
   354  
   355  	throw("world not stopped")
   356  }
   357  
   358  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   359  // passed lock is not held.
   360  //
   361  // nosplit to ensure it can be called in as many contexts as possible.
   362  //go:nosplit
   363  func assertWorldStoppedOrLockHeld(l *mutex) {
   364  	if checkWorldStopped() {
   365  		return
   366  	}
   367  
   368  	gp := getg()
   369  	held := checkLockHeld(gp, l)
   370  	if held {
   371  		return
   372  	}
   373  
   374  	// Crash from system stack to avoid splits that may cause
   375  	// additional issues.
   376  	systemstack(func() {
   377  		printlock()
   378  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   379  		println("<no world stop>")
   380  		printHeldLocks(gp)
   381  		throw("no world stop or required lock!")
   382  	})
   383  }