github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/mklockrank.go (about)

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build ignore
     6  
     7  // mklockrank records the static rank graph of the locks in the
     8  // runtime and generates the rank checking structures in lockrank.go.
     9  package main
    10  
    11  import (
    12  	"bytes"
    13  	"flag"
    14  	"fmt"
    15  	"go/format"
    16  	"internal/dag"
    17  	"io"
    18  	"log"
    19  	"os"
    20  	"strings"
    21  )
    22  
    23  // ranks describes the lock rank graph. See "go doc internal/dag" for
    24  // the syntax.
    25  //
    26  // "a < b" means a must be acquired before b if both are held
    27  // (or, if b is held, a cannot be acquired).
    28  //
    29  // "NONE < a" means no locks may be held when a is acquired.
    30  //
    31  // If a lock is not given a rank, then it is assumed to be a leaf
    32  // lock, which means no other lock can be acquired while it is held.
    33  // Therefore, leaf locks do not need to be given an explicit rank.
    34  //
    35  // Ranks in all caps are pseudo-nodes that help define order, but do
    36  // not actually define a rank.
    37  //
    38  // TODO: It's often hard to correlate rank names to locks. Change
    39  // these to be more consistent with the locks they label.
    40  const ranks = `
    41  # Sysmon
    42  NONE
    43  < sysmon
    44  < scavenge, forcegc;
    45  
    46  # Defer
    47  NONE < defer;
    48  
    49  # GC
    50  NONE <
    51    sweepWaiters,
    52    assistQueue,
    53    sweep;
    54  
    55  # Scheduler, timers, netpoll
    56  NONE < pollDesc, cpuprof;
    57  assistQueue,
    58    cpuprof,
    59    forcegc,
    60    pollDesc, # pollDesc can interact with timers, which can lock sched.
    61    scavenge,
    62    sweep,
    63    sweepWaiters
    64  < sched;
    65  sched < allg, allp;
    66  allp < timers;
    67  timers < netpollInit;
    68  
    69  # Channels
    70  scavenge, sweep < hchan;
    71  NONE < notifyList;
    72  hchan, notifyList < sudog;
    73  
    74  # RWMutex
    75  NONE < rwmutexW;
    76  rwmutexW, sysmon < rwmutexR;
    77  
    78  # Semaphores
    79  NONE < root;
    80  
    81  # Itabs
    82  NONE
    83  < itab
    84  < reflectOffs;
    85  
    86  # User arena state
    87  NONE < userArenaState;
    88  
    89  # Tracing without a P uses a global trace buffer.
    90  scavenge
    91  # Above TRACEGLOBAL can emit a trace event without a P.
    92  < TRACEGLOBAL
    93  # Below TRACEGLOBAL manages the global tracing buffer.
    94  # Note that traceBuf eventually chains to MALLOC, but we never get that far
    95  # in the situation where there's no P.
    96  < traceBuf;
    97  # Starting/stopping tracing traces strings.
    98  traceBuf < traceStrings;
    99  
   100  # Malloc
   101  allg,
   102    hchan,
   103    notifyList,
   104    reflectOffs,
   105    timers,
   106    traceStrings,
   107    userArenaState
   108  # Above MALLOC are things that can allocate memory.
   109  < MALLOC
   110  # Below MALLOC is the malloc implementation.
   111  < fin,
   112    gcBitsArenas,
   113    mheapSpecial,
   114    mspanSpecial,
   115    spanSetSpine,
   116    MPROF;
   117  
   118  # Memory profiling
   119  MPROF < profInsert, profBlock, profMemActive;
   120  profMemActive < profMemFuture;
   121  
   122  # Stack allocation and copying
   123  gcBitsArenas,
   124    netpollInit,
   125    profBlock,
   126    profInsert,
   127    profMemFuture,
   128    spanSetSpine,
   129    fin,
   130    root
   131  # Anything that can grow the stack can acquire STACKGROW.
   132  # (Most higher layers imply STACKGROW, like MALLOC.)
   133  < STACKGROW
   134  # Below STACKGROW is the stack allocator/copying implementation.
   135  < gscan;
   136  gscan, rwmutexR < stackpool;
   137  gscan < stackLarge;
   138  # Generally, hchan must be acquired before gscan. But in one case,
   139  # where we suspend a G and then shrink its stack, syncadjustsudogs
   140  # can acquire hchan locks while holding gscan. To allow this case,
   141  # we use hchanLeaf instead of hchan.
   142  gscan < hchanLeaf;
   143  
   144  # Write barrier
   145  defer,
   146    gscan,
   147    mspanSpecial,
   148    sudog
   149  # Anything that can have write barriers can acquire WB.
   150  # Above WB, we can have write barriers.
   151  < WB
   152  # Below WB is the write barrier implementation.
   153  < wbufSpans;
   154  
   155  # Span allocator
   156  stackLarge,
   157    stackpool,
   158    wbufSpans
   159  # Above mheap is anything that can call the span allocator.
   160  < mheap;
   161  # Below mheap is the span allocator implementation.
   162  mheap, mheapSpecial < globalAlloc;
   163  
   164  # Execution tracer events (with a P)
   165  hchan,
   166    mheap,
   167    root,
   168    sched,
   169    traceStrings,
   170    notifyList,
   171    fin
   172  # Above TRACE is anything that can create a trace event
   173  < TRACE
   174  < trace
   175  < traceStackTab;
   176  
   177  # panic is handled specially. It is implicitly below all other locks.
   178  NONE < panic;
   179  # deadlock is not acquired while holding panic, but it also needs to be
   180  # below all other locks.
   181  panic < deadlock;
   182  # raceFini is only held while exiting.
   183  panic < raceFini;
   184  `
   185  
   186  // cyclicRanks lists lock ranks that allow multiple locks of the same
   187  // rank to be acquired simultaneously. The runtime enforces ordering
   188  // within these ranks using a separate mechanism.
   189  var cyclicRanks = map[string]bool{
   190  	// Multiple timers are locked simultaneously in destroy().
   191  	"timers": true,
   192  	// Multiple hchans are acquired in hchan.sortkey() order in
   193  	// select.
   194  	"hchan": true,
   195  	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
   196  	// syncadjustsudogs().
   197  	"hchanLeaf": true,
   198  	// The point of the deadlock lock is to deadlock.
   199  	"deadlock": true,
   200  }
   201  
   202  func main() {
   203  	flagO := flag.String("o", "", "write to `file` instead of stdout")
   204  	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
   205  	flag.Parse()
   206  	if flag.NArg() != 0 {
   207  		fmt.Fprintf(os.Stderr, "too many arguments")
   208  		os.Exit(2)
   209  	}
   210  
   211  	g, err := dag.Parse(ranks)
   212  	if err != nil {
   213  		log.Fatal(err)
   214  	}
   215  
   216  	var out []byte
   217  	if *flagDot {
   218  		var b bytes.Buffer
   219  		g.TransitiveReduction()
   220  		// Add cyclic edges for visualization.
   221  		for k := range cyclicRanks {
   222  			g.AddEdge(k, k)
   223  		}
   224  		// Reverse the graph. It's much easier to read this as
   225  		// a "<" partial order than a ">" partial order. This
   226  		// ways, locks are acquired from the top going down
   227  		// and time moves forward over the edges instead of
   228  		// backward.
   229  		g.Transpose()
   230  		generateDot(&b, g)
   231  		out = b.Bytes()
   232  	} else {
   233  		var b bytes.Buffer
   234  		generateGo(&b, g)
   235  		out, err = format.Source(b.Bytes())
   236  		if err != nil {
   237  			log.Fatal(err)
   238  		}
   239  	}
   240  
   241  	if *flagO != "" {
   242  		err = os.WriteFile(*flagO, out, 0666)
   243  	} else {
   244  		_, err = os.Stdout.Write(out)
   245  	}
   246  	if err != nil {
   247  		log.Fatal(err)
   248  	}
   249  }
   250  
   251  func generateGo(w io.Writer, g *dag.Graph) {
   252  	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
   253  
   254  package runtime
   255  
   256  type lockRank int
   257  
   258  `)
   259  
   260  	// Create numeric ranks.
   261  	topo := g.Topo()
   262  	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
   263  		topo[i], topo[j] = topo[j], topo[i]
   264  	}
   265  	fmt.Fprintf(w, `
   266  // Constants representing the ranks of all non-leaf runtime locks, in rank order.
   267  // Locks with lower rank must be taken before locks with higher rank,
   268  // in addition to satisfying the partial order in lockPartialOrder.
   269  // A few ranks allow self-cycles, which are specified in lockPartialOrder.
   270  const (
   271  	lockRankUnknown lockRank = iota
   272  
   273  `)
   274  	for _, rank := range topo {
   275  		if isPseudo(rank) {
   276  			fmt.Fprintf(w, "\t// %s\n", rank)
   277  		} else {
   278  			fmt.Fprintf(w, "\t%s\n", cname(rank))
   279  		}
   280  	}
   281  	fmt.Fprintf(w, `)
   282  
   283  // lockRankLeafRank is the rank of lock that does not have a declared rank,
   284  // and hence is a leaf lock.
   285  const lockRankLeafRank lockRank = 1000
   286  `)
   287  
   288  	// Create string table.
   289  	fmt.Fprintf(w, `
   290  // lockNames gives the names associated with each of the above ranks.
   291  var lockNames = []string{
   292  `)
   293  	for _, rank := range topo {
   294  		if !isPseudo(rank) {
   295  			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
   296  		}
   297  	}
   298  	fmt.Fprintf(w, `}
   299  
   300  func (rank lockRank) String() string {
   301  	if rank == 0 {
   302  		return "UNKNOWN"
   303  	}
   304  	if rank == lockRankLeafRank {
   305  		return "LEAF"
   306  	}
   307  	if rank < 0 || int(rank) >= len(lockNames) {
   308  		return "BAD RANK"
   309  	}
   310  	return lockNames[rank]
   311  }
   312  `)
   313  
   314  	// Create partial order structure.
   315  	fmt.Fprintf(w, `
   316  // lockPartialOrder is the transitive closure of the lock rank graph.
   317  // An entry for rank X lists all of the ranks that can already be held
   318  // when rank X is acquired.
   319  //
   320  // Lock ranks that allow self-cycles list themselves.
   321  var lockPartialOrder [][]lockRank = [][]lockRank{
   322  `)
   323  	for _, rank := range topo {
   324  		if isPseudo(rank) {
   325  			continue
   326  		}
   327  		list := []string{}
   328  		for _, before := range g.Edges(rank) {
   329  			if !isPseudo(before) {
   330  				list = append(list, cname(before))
   331  			}
   332  		}
   333  		if cyclicRanks[rank] {
   334  			list = append(list, cname(rank))
   335  		}
   336  
   337  		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
   338  	}
   339  	fmt.Fprintf(w, "}\n")
   340  }
   341  
   342  // cname returns the Go const name for the given lock rank label.
   343  func cname(label string) string {
   344  	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
   345  }
   346  
   347  func isPseudo(label string) bool {
   348  	return strings.ToUpper(label) == label
   349  }
   350  
   351  // generateDot emits a Graphviz dot representation of g to w.
   352  func generateDot(w io.Writer, g *dag.Graph) {
   353  	fmt.Fprintf(w, "digraph g {\n")
   354  
   355  	// Define all nodes.
   356  	for _, node := range g.Nodes {
   357  		fmt.Fprintf(w, "%q;\n", node)
   358  	}
   359  
   360  	// Create edges.
   361  	for _, node := range g.Nodes {
   362  		for _, to := range g.Edges(node) {
   363  			fmt.Fprintf(w, "%q -> %q;\n", node, to)
   364  		}
   365  	}
   366  
   367  	fmt.Fprintf(w, "}\n")
   368  }