github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/mklockrank.go (about) 1 // Copyright 2022 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build ignore 6 7 // mklockrank records the static rank graph of the locks in the 8 // runtime and generates the rank checking structures in lockrank.go. 9 package main 10 11 import ( 12 "bytes" 13 "flag" 14 "fmt" 15 "go/format" 16 "internal/dag" 17 "io" 18 "log" 19 "os" 20 "strings" 21 ) 22 23 // ranks describes the lock rank graph. See "go doc internal/dag" for 24 // the syntax. 25 // 26 // "a < b" means a must be acquired before b if both are held 27 // (or, if b is held, a cannot be acquired). 28 // 29 // "NONE < a" means no locks may be held when a is acquired. 30 // 31 // If a lock is not given a rank, then it is assumed to be a leaf 32 // lock, which means no other lock can be acquired while it is held. 33 // Therefore, leaf locks do not need to be given an explicit rank. 34 // 35 // Ranks in all caps are pseudo-nodes that help define order, but do 36 // not actually define a rank. 37 // 38 // TODO: It's often hard to correlate rank names to locks. Change 39 // these to be more consistent with the locks they label. 40 const ranks = ` 41 # Sysmon 42 NONE 43 < sysmon 44 < scavenge, forcegc; 45 46 # Defer 47 NONE < defer; 48 49 # GC 50 NONE < 51 sweepWaiters, 52 assistQueue, 53 sweep; 54 55 # Scheduler, timers, netpoll 56 NONE < pollDesc, cpuprof; 57 assistQueue, 58 cpuprof, 59 forcegc, 60 pollDesc, # pollDesc can interact with timers, which can lock sched. 61 scavenge, 62 sweep, 63 sweepWaiters 64 < sched; 65 sched < allg, allp; 66 allp < timers; 67 timers < netpollInit; 68 69 # Channels 70 scavenge, sweep < hchan; 71 NONE < notifyList; 72 hchan, notifyList < sudog; 73 74 # RWMutex 75 NONE < rwmutexW; 76 rwmutexW, sysmon < rwmutexR; 77 78 # Semaphores 79 NONE < root; 80 81 # Itabs 82 NONE 83 < itab 84 < reflectOffs; 85 86 # User arena state 87 NONE < userArenaState; 88 89 # Tracing without a P uses a global trace buffer. 90 scavenge 91 # Above TRACEGLOBAL can emit a trace event without a P. 92 < TRACEGLOBAL 93 # Below TRACEGLOBAL manages the global tracing buffer. 94 # Note that traceBuf eventually chains to MALLOC, but we never get that far 95 # in the situation where there's no P. 96 < traceBuf; 97 # Starting/stopping tracing traces strings. 98 traceBuf < traceStrings; 99 100 # Malloc 101 allg, 102 hchan, 103 notifyList, 104 reflectOffs, 105 timers, 106 traceStrings, 107 userArenaState 108 # Above MALLOC are things that can allocate memory. 109 < MALLOC 110 # Below MALLOC is the malloc implementation. 111 < fin, 112 gcBitsArenas, 113 mheapSpecial, 114 mspanSpecial, 115 spanSetSpine, 116 MPROF; 117 118 # Memory profiling 119 MPROF < profInsert, profBlock, profMemActive; 120 profMemActive < profMemFuture; 121 122 # Stack allocation and copying 123 gcBitsArenas, 124 netpollInit, 125 profBlock, 126 profInsert, 127 profMemFuture, 128 spanSetSpine, 129 fin, 130 root 131 # Anything that can grow the stack can acquire STACKGROW. 132 # (Most higher layers imply STACKGROW, like MALLOC.) 133 < STACKGROW 134 # Below STACKGROW is the stack allocator/copying implementation. 135 < gscan; 136 gscan, rwmutexR < stackpool; 137 gscan < stackLarge; 138 # Generally, hchan must be acquired before gscan. But in one case, 139 # where we suspend a G and then shrink its stack, syncadjustsudogs 140 # can acquire hchan locks while holding gscan. To allow this case, 141 # we use hchanLeaf instead of hchan. 142 gscan < hchanLeaf; 143 144 # Write barrier 145 defer, 146 gscan, 147 mspanSpecial, 148 sudog 149 # Anything that can have write barriers can acquire WB. 150 # Above WB, we can have write barriers. 151 < WB 152 # Below WB is the write barrier implementation. 153 < wbufSpans; 154 155 # Span allocator 156 stackLarge, 157 stackpool, 158 wbufSpans 159 # Above mheap is anything that can call the span allocator. 160 < mheap; 161 # Below mheap is the span allocator implementation. 162 mheap, mheapSpecial < globalAlloc; 163 164 # Execution tracer events (with a P) 165 hchan, 166 mheap, 167 root, 168 sched, 169 traceStrings, 170 notifyList, 171 fin 172 # Above TRACE is anything that can create a trace event 173 < TRACE 174 < trace 175 < traceStackTab; 176 177 # panic is handled specially. It is implicitly below all other locks. 178 NONE < panic; 179 # deadlock is not acquired while holding panic, but it also needs to be 180 # below all other locks. 181 panic < deadlock; 182 ` 183 184 // cyclicRanks lists lock ranks that allow multiple locks of the same 185 // rank to be acquired simultaneously. The runtime enforces ordering 186 // within these ranks using a separate mechanism. 187 var cyclicRanks = map[string]bool{ 188 // Multiple timers are locked simultaneously in destroy(). 189 "timers": true, 190 // Multiple hchans are acquired in hchan.sortkey() order in 191 // select. 192 "hchan": true, 193 // Multiple hchanLeafs are acquired in hchan.sortkey() order in 194 // syncadjustsudogs(). 195 "hchanLeaf": true, 196 // The point of the deadlock lock is to deadlock. 197 "deadlock": true, 198 } 199 200 func main() { 201 flagO := flag.String("o", "", "write to `file` instead of stdout") 202 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go") 203 flag.Parse() 204 if flag.NArg() != 0 { 205 fmt.Fprintf(os.Stderr, "too many arguments") 206 os.Exit(2) 207 } 208 209 g, err := dag.Parse(ranks) 210 if err != nil { 211 log.Fatal(err) 212 } 213 214 var out []byte 215 if *flagDot { 216 var b bytes.Buffer 217 g.TransitiveReduction() 218 // Add cyclic edges for visualization. 219 for k := range cyclicRanks { 220 g.AddEdge(k, k) 221 } 222 // Reverse the graph. It's much easier to read this as 223 // a "<" partial order than a ">" partial order. This 224 // ways, locks are acquired from the top going down 225 // and time moves forward over the edges instead of 226 // backward. 227 g.Transpose() 228 generateDot(&b, g) 229 out = b.Bytes() 230 } else { 231 var b bytes.Buffer 232 generateGo(&b, g) 233 out, err = format.Source(b.Bytes()) 234 if err != nil { 235 log.Fatal(err) 236 } 237 } 238 239 if *flagO != "" { 240 err = os.WriteFile(*flagO, out, 0666) 241 } else { 242 _, err = os.Stdout.Write(out) 243 } 244 if err != nil { 245 log.Fatal(err) 246 } 247 } 248 249 func generateGo(w io.Writer, g *dag.Graph) { 250 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT. 251 252 package runtime 253 254 type lockRank int 255 256 `) 257 258 // Create numeric ranks. 259 topo := g.Topo() 260 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 { 261 topo[i], topo[j] = topo[j], topo[i] 262 } 263 fmt.Fprintf(w, ` 264 // Constants representing the ranks of all non-leaf runtime locks, in rank order. 265 // Locks with lower rank must be taken before locks with higher rank, 266 // in addition to satisfying the partial order in lockPartialOrder. 267 // A few ranks allow self-cycles, which are specified in lockPartialOrder. 268 const ( 269 lockRankUnknown lockRank = iota 270 271 `) 272 for _, rank := range topo { 273 if isPseudo(rank) { 274 fmt.Fprintf(w, "\t// %s\n", rank) 275 } else { 276 fmt.Fprintf(w, "\t%s\n", cname(rank)) 277 } 278 } 279 fmt.Fprintf(w, `) 280 281 // lockRankLeafRank is the rank of lock that does not have a declared rank, 282 // and hence is a leaf lock. 283 const lockRankLeafRank lockRank = 1000 284 `) 285 286 // Create string table. 287 fmt.Fprintf(w, ` 288 // lockNames gives the names associated with each of the above ranks. 289 var lockNames = []string{ 290 `) 291 for _, rank := range topo { 292 if !isPseudo(rank) { 293 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank) 294 } 295 } 296 fmt.Fprintf(w, `} 297 298 func (rank lockRank) String() string { 299 if rank == 0 { 300 return "UNKNOWN" 301 } 302 if rank == lockRankLeafRank { 303 return "LEAF" 304 } 305 if rank < 0 || int(rank) >= len(lockNames) { 306 return "BAD RANK" 307 } 308 return lockNames[rank] 309 } 310 `) 311 312 // Create partial order structure. 313 fmt.Fprintf(w, ` 314 // lockPartialOrder is the transitive closure of the lock rank graph. 315 // An entry for rank X lists all of the ranks that can already be held 316 // when rank X is acquired. 317 // 318 // Lock ranks that allow self-cycles list themselves. 319 var lockPartialOrder [][]lockRank = [][]lockRank{ 320 `) 321 for _, rank := range topo { 322 if isPseudo(rank) { 323 continue 324 } 325 list := []string{} 326 for _, before := range g.Edges(rank) { 327 if !isPseudo(before) { 328 list = append(list, cname(before)) 329 } 330 } 331 if cyclicRanks[rank] { 332 list = append(list, cname(rank)) 333 } 334 335 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", ")) 336 } 337 fmt.Fprintf(w, "}\n") 338 } 339 340 // cname returns the Go const name for the given lock rank label. 341 func cname(label string) string { 342 return "lockRank" + strings.ToUpper(label[:1]) + label[1:] 343 } 344 345 func isPseudo(label string) bool { 346 return strings.ToUpper(label) == label 347 } 348 349 // generateDot emits a Graphviz dot representation of g to w. 350 func generateDot(w io.Writer, g *dag.Graph) { 351 fmt.Fprintf(w, "digraph g {\n") 352 353 // Define all nodes. 354 for _, node := range g.Nodes { 355 fmt.Fprintf(w, "%q;\n", node) 356 } 357 358 // Create edges. 359 for _, node := range g.Nodes { 360 for _, to := range g.Edges(node) { 361 fmt.Fprintf(w, "%q -> %q;\n", node, to) 362 } 363 } 364 365 fmt.Fprintf(w, "}\n") 366 }