github.com/gidoBOSSftw5731/go/src@v0.0.0-20210226122457-d24b0edbf019/runtime/lockrank_on.go (about)

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  // +build goexperiment.staticlockranking
     7  
     8  package runtime
     9  
    10  import (
    11  	"runtime/internal/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    16  // stopped.
    17  var worldIsStopped uint32
    18  
    19  // lockRankStruct is embedded in mutex
    20  type lockRankStruct struct {
    21  	// static lock ranking of the lock
    22  	rank lockRank
    23  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    24  	// 32-bit systems.
    25  	pad int
    26  }
    27  
    28  // init checks that the partial order in lockPartialOrder fits within the total
    29  // order determined by the order of the lockRank constants.
    30  func init() {
    31  	for rank, list := range lockPartialOrder {
    32  		for _, entry := range list {
    33  			if entry > lockRank(rank) {
    34  				println("lockPartial order row", lockRank(rank).String(), "entry", entry.String())
    35  				throw("lockPartialOrder table is inconsistent with total lock ranking order")
    36  			}
    37  		}
    38  	}
    39  }
    40  
    41  func lockInit(l *mutex, rank lockRank) {
    42  	l.rank = rank
    43  }
    44  
    45  func getLockRank(l *mutex) lockRank {
    46  	return l.rank
    47  }
    48  
    49  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    50  // when acquiring a non-static lock.
    51  //
    52  // Note that we need to be careful about stack splits:
    53  //
    54  // This function is not nosplit, thus it may split at function entry. This may
    55  // introduce a new edge in the lock order, but it is no different from any
    56  // other (nosplit) call before this call (including the call to lock() itself).
    57  //
    58  // However, we switch to the systemstack to record the lock held to ensure that
    59  // we record an accurate lock ordering. e.g., without systemstack, a stack
    60  // split on entry to lock2() would record stack split locks as taken after l,
    61  // even though l is not actually locked yet.
    62  func lockWithRank(l *mutex, rank lockRank) {
    63  	if l == &debuglock || l == &paniclk {
    64  		// debuglock is only used for println/printlock(). Don't do lock
    65  		// rank recording for it, since print/println are used when
    66  		// printing out a lock ordering problem below.
    67  		//
    68  		// paniclk has an ordering problem, since it can be acquired
    69  		// during a panic with any other locks held (especially if the
    70  		// panic is because of a directed segv), and yet also allg is
    71  		// acquired after paniclk in tracebackothers()). This is a genuine
    72  		// problem, so for now we don't do lock rank recording for paniclk
    73  		// either.
    74  		lock2(l)
    75  		return
    76  	}
    77  	if rank == 0 {
    78  		rank = lockRankLeafRank
    79  	}
    80  	gp := getg()
    81  	// Log the new class.
    82  	systemstack(func() {
    83  		i := gp.m.locksHeldLen
    84  		if i >= len(gp.m.locksHeld) {
    85  			throw("too many locks held concurrently for rank checking")
    86  		}
    87  		gp.m.locksHeld[i].rank = rank
    88  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    89  		gp.m.locksHeldLen++
    90  
    91  		// i is the index of the lock being acquired
    92  		if i > 0 {
    93  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    94  		}
    95  		lock2(l)
    96  	})
    97  }
    98  
    99  // nosplit to ensure it can be called in as many contexts as possible.
   100  //go:nosplit
   101  func printHeldLocks(gp *g) {
   102  	if gp.m.locksHeldLen == 0 {
   103  		println("<none>")
   104  		return
   105  	}
   106  
   107  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
   108  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
   109  	}
   110  }
   111  
   112  // acquireLockRank acquires a rank which is not associated with a mutex lock
   113  //
   114  // This function may be called in nosplit context and thus must be nosplit.
   115  //go:nosplit
   116  func acquireLockRank(rank lockRank) {
   117  	gp := getg()
   118  	// Log the new class. See comment on lockWithRank.
   119  	systemstack(func() {
   120  		i := gp.m.locksHeldLen
   121  		if i >= len(gp.m.locksHeld) {
   122  			throw("too many locks held concurrently for rank checking")
   123  		}
   124  		gp.m.locksHeld[i].rank = rank
   125  		gp.m.locksHeld[i].lockAddr = 0
   126  		gp.m.locksHeldLen++
   127  
   128  		// i is the index of the lock being acquired
   129  		if i > 0 {
   130  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   131  		}
   132  	})
   133  }
   134  
   135  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   136  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   137  //
   138  //go:systemstack
   139  func checkRanks(gp *g, prevRank, rank lockRank) {
   140  	rankOK := false
   141  	if rank < prevRank {
   142  		// If rank < prevRank, then we definitely have a rank error
   143  		rankOK = false
   144  	} else if rank == lockRankLeafRank {
   145  		// If new lock is a leaf lock, then the preceding lock can
   146  		// be anything except another leaf lock.
   147  		rankOK = prevRank < lockRankLeafRank
   148  	} else {
   149  		// We've now verified the total lock ranking, but we
   150  		// also enforce the partial ordering specified by
   151  		// lockPartialOrder as well. Two locks with the same rank
   152  		// can only be acquired at the same time if explicitly
   153  		// listed in the lockPartialOrder table.
   154  		list := lockPartialOrder[rank]
   155  		for _, entry := range list {
   156  			if entry == prevRank {
   157  				rankOK = true
   158  				break
   159  			}
   160  		}
   161  	}
   162  	if !rankOK {
   163  		printlock()
   164  		println(gp.m.procid, " ======")
   165  		printHeldLocks(gp)
   166  		throw("lock ordering problem")
   167  	}
   168  }
   169  
   170  // See comment on lockWithRank regarding stack splitting.
   171  func unlockWithRank(l *mutex) {
   172  	if l == &debuglock || l == &paniclk {
   173  		// See comment at beginning of lockWithRank.
   174  		unlock2(l)
   175  		return
   176  	}
   177  	gp := getg()
   178  	systemstack(func() {
   179  		found := false
   180  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   181  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   182  				found = true
   183  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   184  				gp.m.locksHeldLen--
   185  				break
   186  			}
   187  		}
   188  		if !found {
   189  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   190  			throw("unlock without matching lock acquire")
   191  		}
   192  		unlock2(l)
   193  	})
   194  }
   195  
   196  // releaseLockRank releases a rank which is not associated with a mutex lock
   197  //
   198  // This function may be called in nosplit context and thus must be nosplit.
   199  //go:nosplit
   200  func releaseLockRank(rank lockRank) {
   201  	gp := getg()
   202  	systemstack(func() {
   203  		found := false
   204  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   205  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   206  				found = true
   207  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   208  				gp.m.locksHeldLen--
   209  				break
   210  			}
   211  		}
   212  		if !found {
   213  			println(gp.m.procid, ":", rank.String(), rank)
   214  			throw("lockRank release without matching lockRank acquire")
   215  		}
   216  	})
   217  }
   218  
   219  // See comment on lockWithRank regarding stack splitting.
   220  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   221  	gp := getg()
   222  	if gp.m.locksHeldLen == 0 {
   223  		// No possibility of lock ordering problem if no other locks held
   224  		return
   225  	}
   226  
   227  	systemstack(func() {
   228  		i := gp.m.locksHeldLen
   229  		if i >= len(gp.m.locksHeld) {
   230  			throw("too many locks held concurrently for rank checking")
   231  		}
   232  		// Temporarily add this lock to the locksHeld list, so
   233  		// checkRanks() will print out list, including this lock, if there
   234  		// is a lock ordering problem.
   235  		gp.m.locksHeld[i].rank = rank
   236  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   237  		gp.m.locksHeldLen++
   238  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   239  		gp.m.locksHeldLen--
   240  	})
   241  }
   242  
   243  // nosplit to ensure it can be called in as many contexts as possible.
   244  //go:nosplit
   245  func checkLockHeld(gp *g, l *mutex) bool {
   246  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   247  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   248  			return true
   249  		}
   250  	}
   251  	return false
   252  }
   253  
   254  // assertLockHeld throws if l is not held by the caller.
   255  //
   256  // nosplit to ensure it can be called in as many contexts as possible.
   257  //go:nosplit
   258  func assertLockHeld(l *mutex) {
   259  	gp := getg()
   260  
   261  	held := checkLockHeld(gp, l)
   262  	if held {
   263  		return
   264  	}
   265  
   266  	// Crash from system stack to avoid splits that may cause
   267  	// additional issues.
   268  	systemstack(func() {
   269  		printlock()
   270  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   271  		printHeldLocks(gp)
   272  		throw("not holding required lock!")
   273  	})
   274  }
   275  
   276  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   277  //
   278  // This is less precise than assertLockHeld, but can be used in places where a
   279  // pointer to the exact mutex is not available.
   280  //
   281  // nosplit to ensure it can be called in as many contexts as possible.
   282  //go:nosplit
   283  func assertRankHeld(r lockRank) {
   284  	gp := getg()
   285  
   286  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   287  		if gp.m.locksHeld[i].rank == r {
   288  			return
   289  		}
   290  	}
   291  
   292  	// Crash from system stack to avoid splits that may cause
   293  	// additional issues.
   294  	systemstack(func() {
   295  		printlock()
   296  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   297  		printHeldLocks(gp)
   298  		throw("not holding required lock!")
   299  	})
   300  }
   301  
   302  // worldStopped notes that the world is stopped.
   303  //
   304  // Caller must hold worldsema.
   305  //
   306  // nosplit to ensure it can be called in as many contexts as possible.
   307  //go:nosplit
   308  func worldStopped() {
   309  	if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
   310  		systemstack(func() {
   311  			print("world stop count=", stopped, "\n")
   312  			throw("recursive world stop")
   313  		})
   314  	}
   315  }
   316  
   317  // worldStarted that the world is starting.
   318  //
   319  // Caller must hold worldsema.
   320  //
   321  // nosplit to ensure it can be called in as many contexts as possible.
   322  //go:nosplit
   323  func worldStarted() {
   324  	if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
   325  		systemstack(func() {
   326  			print("world stop count=", stopped, "\n")
   327  			throw("released non-stopped world stop")
   328  		})
   329  	}
   330  }
   331  
   332  // nosplit to ensure it can be called in as many contexts as possible.
   333  //go:nosplit
   334  func checkWorldStopped() bool {
   335  	stopped := atomic.Load(&worldIsStopped)
   336  	if stopped > 1 {
   337  		systemstack(func() {
   338  			print("inconsistent world stop count=", stopped, "\n")
   339  			throw("inconsistent world stop count")
   340  		})
   341  	}
   342  
   343  	return stopped == 1
   344  }
   345  
   346  // assertWorldStopped throws if the world is not stopped. It does not check
   347  // which M stopped the world.
   348  //
   349  // nosplit to ensure it can be called in as many contexts as possible.
   350  //go:nosplit
   351  func assertWorldStopped() {
   352  	if checkWorldStopped() {
   353  		return
   354  	}
   355  
   356  	throw("world not stopped")
   357  }
   358  
   359  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   360  // passed lock is not held.
   361  //
   362  // nosplit to ensure it can be called in as many contexts as possible.
   363  //go:nosplit
   364  func assertWorldStoppedOrLockHeld(l *mutex) {
   365  	if checkWorldStopped() {
   366  		return
   367  	}
   368  
   369  	gp := getg()
   370  	held := checkLockHeld(gp, l)
   371  	if held {
   372  		return
   373  	}
   374  
   375  	// Crash from system stack to avoid splits that may cause
   376  	// additional issues.
   377  	systemstack(func() {
   378  		printlock()
   379  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   380  		println("<no world stop>")
   381  		printHeldLocks(gp)
   382  		throw("no world stop or required lock!")
   383  	})
   384  }