github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/trace2runtime.go (about)

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.exectracer2
     6  
     7  // Runtime -> tracer API.
     8  
     9  package runtime
    10  
    11  import (
    12  	"runtime/internal/atomic"
    13  	_ "unsafe" // for go:linkname
    14  )
    15  
    16  // gTraceState is per-G state for the tracer.
    17  type gTraceState struct {
    18  	traceSchedResourceState
    19  }
    20  
    21  // reset resets the gTraceState for a new goroutine.
    22  func (s *gTraceState) reset() {
    23  	s.seq = [2]uint64{}
    24  	// N.B. s.statusTraced is managed and cleared separately.
    25  }
    26  
    27  // mTraceState is per-M state for the tracer.
    28  type mTraceState struct {
    29  	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
    30  	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
    31  	link    *m             // Snapshot of alllink or freelink.
    32  }
    33  
    34  // pTraceState is per-P state for the tracer.
    35  type pTraceState struct {
    36  	traceSchedResourceState
    37  
    38  	// mSyscallID is the ID of the M this was bound to before entering a syscall.
    39  	mSyscallID int64
    40  
    41  	// maySweep indicates the sweep events should be traced.
    42  	// This is used to defer the sweep start event until a span
    43  	// has actually been swept.
    44  	maySweep bool
    45  
    46  	// inSweep indicates that at least one sweep event has been traced.
    47  	inSweep bool
    48  
    49  	// swept and reclaimed track the number of bytes swept and reclaimed
    50  	// by sweeping in the current sweep loop (while maySweep was true).
    51  	swept, reclaimed uintptr
    52  }
    53  
    54  // traceLockInit initializes global trace locks.
    55  func traceLockInit() {
    56  	// Sharing a lock rank here is fine because they should never be accessed
    57  	// together. If they are, we want to find out immediately.
    58  	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
    59  	lockInit(&trace.stringTab[0].tab.lock, lockRankTraceStrings)
    60  	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
    61  	lockInit(&trace.stringTab[1].tab.lock, lockRankTraceStrings)
    62  	lockInit(&trace.stackTab[0].tab.lock, lockRankTraceStackTab)
    63  	lockInit(&trace.stackTab[1].tab.lock, lockRankTraceStackTab)
    64  	lockInit(&trace.lock, lockRankTrace)
    65  }
    66  
    67  // lockRankMayTraceFlush records the lock ranking effects of a
    68  // potential call to traceFlush.
    69  //
    70  // nosplit because traceAcquire is nosplit.
    71  //
    72  //go:nosplit
    73  func lockRankMayTraceFlush() {
    74  	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
    75  }
    76  
    77  // traceBlockReason is an enumeration of reasons a goroutine might block.
    78  // This is the interface the rest of the runtime uses to tell the
    79  // tracer why a goroutine blocked. The tracer then propagates this information
    80  // into the trace however it sees fit.
    81  //
    82  // Note that traceBlockReasons should not be compared, since reasons that are
    83  // distinct by name may *not* be distinct by value.
    84  type traceBlockReason uint8
    85  
    86  const (
    87  	traceBlockGeneric traceBlockReason = iota
    88  	traceBlockForever
    89  	traceBlockNet
    90  	traceBlockSelect
    91  	traceBlockCondWait
    92  	traceBlockSync
    93  	traceBlockChanSend
    94  	traceBlockChanRecv
    95  	traceBlockGCMarkAssist
    96  	traceBlockGCSweep
    97  	traceBlockSystemGoroutine
    98  	traceBlockPreempted
    99  	traceBlockDebugCall
   100  	traceBlockUntilGCEnds
   101  	traceBlockSleep
   102  )
   103  
   104  var traceBlockReasonStrings = [...]string{
   105  	traceBlockGeneric:         "unspecified",
   106  	traceBlockForever:         "forever",
   107  	traceBlockNet:             "network",
   108  	traceBlockSelect:          "select",
   109  	traceBlockCondWait:        "sync.(*Cond).Wait",
   110  	traceBlockSync:            "sync",
   111  	traceBlockChanSend:        "chan send",
   112  	traceBlockChanRecv:        "chan receive",
   113  	traceBlockGCMarkAssist:    "GC mark assist wait for work",
   114  	traceBlockGCSweep:         "GC background sweeper wait",
   115  	traceBlockSystemGoroutine: "system goroutine wait",
   116  	traceBlockPreempted:       "preempted",
   117  	traceBlockDebugCall:       "wait for debug call",
   118  	traceBlockUntilGCEnds:     "wait until GC ends",
   119  	traceBlockSleep:           "sleep",
   120  }
   121  
   122  // traceGoStopReason is an enumeration of reasons a goroutine might yield.
   123  //
   124  // Note that traceGoStopReasons should not be compared, since reasons that are
   125  // distinct by name may *not* be distinct by value.
   126  type traceGoStopReason uint8
   127  
   128  const (
   129  	traceGoStopGeneric traceGoStopReason = iota
   130  	traceGoStopGoSched
   131  	traceGoStopPreempted
   132  )
   133  
   134  var traceGoStopReasonStrings = [...]string{
   135  	traceGoStopGeneric:   "unspecified",
   136  	traceGoStopGoSched:   "runtime.Gosched",
   137  	traceGoStopPreempted: "preempted",
   138  }
   139  
   140  // traceEnabled returns true if the trace is currently enabled.
   141  //
   142  //go:nosplit
   143  func traceEnabled() bool {
   144  	return trace.enabled
   145  }
   146  
   147  // traceShuttingDown returns true if the trace is currently shutting down.
   148  func traceShuttingDown() bool {
   149  	return trace.shutdown.Load()
   150  }
   151  
   152  // traceLocker represents an M writing trace events. While a traceLocker value
   153  // is valid, the tracer observes all operations on the G/M/P or trace events being
   154  // written as happening atomically.
   155  type traceLocker struct {
   156  	mp  *m
   157  	gen uintptr
   158  }
   159  
   160  // debugTraceReentrancy checks if the trace is reentrant.
   161  //
   162  // This is optional because throwing in a function makes it instantly
   163  // not inlineable, and we want traceAcquire to be inlineable for
   164  // low overhead when the trace is disabled.
   165  const debugTraceReentrancy = false
   166  
   167  // traceAcquire prepares this M for writing one or more trace events.
   168  //
   169  // nosplit because it's called on the syscall path when stack movement is forbidden.
   170  //
   171  //go:nosplit
   172  func traceAcquire() traceLocker {
   173  	if !traceEnabled() {
   174  		return traceLocker{}
   175  	}
   176  	return traceAcquireEnabled()
   177  }
   178  
   179  // traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
   180  // broken out to make traceAcquire inlineable to keep the overhead of the tracer
   181  // when it's disabled low.
   182  //
   183  // nosplit because it's called by traceAcquire, which is nosplit.
   184  //
   185  //go:nosplit
   186  func traceAcquireEnabled() traceLocker {
   187  	// Any time we acquire a traceLocker, we may flush a trace buffer. But
   188  	// buffer flushes are rare. Record the lock edge even if it doesn't happen
   189  	// this time.
   190  	lockRankMayTraceFlush()
   191  
   192  	// Prevent preemption.
   193  	mp := acquirem()
   194  
   195  	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
   196  	// until all Ms are observed to be outside of their seqlock critical section.
   197  	//
   198  	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
   199  	// usage of the seqlock here, make sure to also look at what traceCPUSample is
   200  	// doing.
   201  	seq := mp.trace.seqlock.Add(1)
   202  	if debugTraceReentrancy && seq%2 != 1 {
   203  		throw("bad use of trace.seqlock or tracer is reentrant")
   204  	}
   205  
   206  	// N.B. This load of gen appears redundant with the one in traceEnabled.
   207  	// However, it's very important that the gen we use for writing to the trace
   208  	// is acquired under a traceLocker so traceAdvance can make sure no stale
   209  	// gen values are being used.
   210  	//
   211  	// Because we're doing this load again, it also means that the trace
   212  	// might end up being disabled when we load it. In that case we need to undo
   213  	// what we did and bail.
   214  	gen := trace.gen.Load()
   215  	if gen == 0 {
   216  		mp.trace.seqlock.Add(1)
   217  		releasem(mp)
   218  		return traceLocker{}
   219  	}
   220  	return traceLocker{mp, gen}
   221  }
   222  
   223  // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
   224  //
   225  // nosplit because it's called on the syscall path when stack movement is forbidden.
   226  //
   227  //go:nosplit
   228  func (tl traceLocker) ok() bool {
   229  	return tl.gen != 0
   230  }
   231  
   232  // traceRelease indicates that this M is done writing trace events.
   233  //
   234  // nosplit because it's called on the syscall path when stack movement is forbidden.
   235  //
   236  //go:nosplit
   237  func traceRelease(tl traceLocker) {
   238  	seq := tl.mp.trace.seqlock.Add(1)
   239  	if debugTraceReentrancy && seq%2 != 0 {
   240  		print("runtime: seq=", seq, "\n")
   241  		throw("bad use of trace.seqlock")
   242  	}
   243  	releasem(tl.mp)
   244  }
   245  
   246  // traceExitingSyscall marks a goroutine as exiting the syscall slow path.
   247  //
   248  // Must be paired with a traceExitedSyscall call.
   249  func traceExitingSyscall() {
   250  	trace.exitingSyscall.Add(1)
   251  }
   252  
   253  // traceExitedSyscall marks a goroutine as having exited the syscall slow path.
   254  func traceExitedSyscall() {
   255  	trace.exitingSyscall.Add(-1)
   256  }
   257  
   258  // Gomaxprocs emits a ProcsChange event.
   259  func (tl traceLocker) Gomaxprocs(procs int32) {
   260  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
   261  }
   262  
   263  // ProcStart traces a ProcStart event.
   264  //
   265  // Must be called with a valid P.
   266  func (tl traceLocker) ProcStart() {
   267  	pp := tl.mp.p.ptr()
   268  	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
   269  	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
   270  	// is during a syscall.
   271  	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
   272  }
   273  
   274  // ProcStop traces a ProcStop event.
   275  func (tl traceLocker) ProcStop(pp *p) {
   276  	// The only time a goroutine is allowed to have its Proc moved around
   277  	// from under it is during a syscall.
   278  	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
   279  }
   280  
   281  // GCActive traces a GCActive event.
   282  //
   283  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   284  // easily and only depends on where it's currently called.
   285  func (tl traceLocker) GCActive() {
   286  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
   287  	// N.B. Only one GC can be running at a time, so this is naturally
   288  	// serialized by the caller.
   289  	trace.seqGC++
   290  }
   291  
   292  // GCStart traces a GCBegin event.
   293  //
   294  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   295  // easily and only depends on where it's currently called.
   296  func (tl traceLocker) GCStart() {
   297  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
   298  	// N.B. Only one GC can be running at a time, so this is naturally
   299  	// serialized by the caller.
   300  	trace.seqGC++
   301  }
   302  
   303  // GCDone traces a GCEnd event.
   304  //
   305  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   306  // easily and only depends on where it's currently called.
   307  func (tl traceLocker) GCDone() {
   308  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
   309  	// N.B. Only one GC can be running at a time, so this is naturally
   310  	// serialized by the caller.
   311  	trace.seqGC++
   312  }
   313  
   314  // STWStart traces a STWBegin event.
   315  func (tl traceLocker) STWStart(reason stwReason) {
   316  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   317  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   318  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
   319  }
   320  
   321  // STWDone traces a STWEnd event.
   322  func (tl traceLocker) STWDone() {
   323  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   324  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   325  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
   326  }
   327  
   328  // GCSweepStart prepares to trace a sweep loop. This does not
   329  // emit any events until traceGCSweepSpan is called.
   330  //
   331  // GCSweepStart must be paired with traceGCSweepDone and there
   332  // must be no preemption points between these two calls.
   333  //
   334  // Must be called with a valid P.
   335  func (tl traceLocker) GCSweepStart() {
   336  	// Delay the actual GCSweepBegin event until the first span
   337  	// sweep. If we don't sweep anything, don't emit any events.
   338  	pp := tl.mp.p.ptr()
   339  	if pp.trace.maySweep {
   340  		throw("double traceGCSweepStart")
   341  	}
   342  	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
   343  }
   344  
   345  // GCSweepSpan traces the sweep of a single span. If this is
   346  // the first span swept since traceGCSweepStart was called, this
   347  // will emit a GCSweepBegin event.
   348  //
   349  // This may be called outside a traceGCSweepStart/traceGCSweepDone
   350  // pair; however, it will not emit any trace events in this case.
   351  //
   352  // Must be called with a valid P.
   353  func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
   354  	pp := tl.mp.p.ptr()
   355  	if pp.trace.maySweep {
   356  		if pp.trace.swept == 0 {
   357  			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
   358  			pp.trace.inSweep = true
   359  		}
   360  		pp.trace.swept += bytesSwept
   361  	}
   362  }
   363  
   364  // GCSweepDone finishes tracing a sweep loop. If any memory was
   365  // swept (i.e. traceGCSweepSpan emitted an event) then this will emit
   366  // a GCSweepEnd event.
   367  //
   368  // Must be called with a valid P.
   369  func (tl traceLocker) GCSweepDone() {
   370  	pp := tl.mp.p.ptr()
   371  	if !pp.trace.maySweep {
   372  		throw("missing traceGCSweepStart")
   373  	}
   374  	if pp.trace.inSweep {
   375  		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
   376  		pp.trace.inSweep = false
   377  	}
   378  	pp.trace.maySweep = false
   379  }
   380  
   381  // GCMarkAssistStart emits a MarkAssistBegin event.
   382  func (tl traceLocker) GCMarkAssistStart() {
   383  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
   384  }
   385  
   386  // GCMarkAssistDone emits a MarkAssistEnd event.
   387  func (tl traceLocker) GCMarkAssistDone() {
   388  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
   389  }
   390  
   391  // GoCreate emits a GoCreate event.
   392  func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
   393  	newg.trace.setStatusTraced(tl.gen)
   394  	ev := traceEvGoCreate
   395  	if blocked {
   396  		ev = traceEvGoCreateBlocked
   397  	}
   398  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
   399  }
   400  
   401  // GoStart emits a GoStart event.
   402  //
   403  // Must be called with a valid P.
   404  func (tl traceLocker) GoStart() {
   405  	gp := getg().m.curg
   406  	pp := gp.m.p
   407  	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
   408  	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
   409  	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
   410  		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
   411  	}
   412  	w.end()
   413  }
   414  
   415  // GoEnd emits a GoDestroy event.
   416  //
   417  // TODO(mknyszek): Rename this to GoDestroy.
   418  func (tl traceLocker) GoEnd() {
   419  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
   420  }
   421  
   422  // GoSched emits a GoStop event with a GoSched reason.
   423  func (tl traceLocker) GoSched() {
   424  	tl.GoStop(traceGoStopGoSched)
   425  }
   426  
   427  // GoPreempt emits a GoStop event with a GoPreempted reason.
   428  func (tl traceLocker) GoPreempt() {
   429  	tl.GoStop(traceGoStopPreempted)
   430  }
   431  
   432  // GoStop emits a GoStop event with the provided reason.
   433  func (tl traceLocker) GoStop(reason traceGoStopReason) {
   434  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
   435  }
   436  
   437  // GoPark emits a GoBlock event with the provided reason.
   438  //
   439  // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
   440  // that we have both, and waitReason is way more descriptive.
   441  func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
   442  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
   443  }
   444  
   445  // GoUnpark emits a GoUnblock event.
   446  func (tl traceLocker) GoUnpark(gp *g, skip int) {
   447  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   448  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   449  	// Careful: don't use the event writer. We never want status or in-progress events
   450  	// to trigger more in-progress events.
   451  	w.w = emitUnblockStatus(w.w, gp, tl.gen)
   452  	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
   453  }
   454  
   455  // GoCoroswitch emits a GoSwitch event. If destroy is true, the calling goroutine
   456  // is simultaneously being destroyed.
   457  func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
   458  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   459  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   460  	// Careful: don't use the event writer. We never want status or in-progress events
   461  	// to trigger more in-progress events.
   462  	w.w = emitUnblockStatus(w.w, nextg, tl.gen)
   463  	ev := traceEvGoSwitch
   464  	if destroy {
   465  		ev = traceEvGoSwitchDestroy
   466  	}
   467  	w.commit(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
   468  }
   469  
   470  // emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
   471  // unblocked to the trace writer.
   472  func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
   473  	if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
   474  		w = w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist)
   475  	}
   476  	return w
   477  }
   478  
   479  // GoSysCall emits a GoSyscallBegin event.
   480  //
   481  // Must be called with a valid P.
   482  func (tl traceLocker) GoSysCall() {
   483  	var skip int
   484  	switch {
   485  	case tracefpunwindoff():
   486  		// Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
   487  		// results by hard coding the number of frames in between our caller and the
   488  		// actual syscall, see cases below.
   489  		// TODO(felixge): Implement gp.syscallbp to avoid this workaround?
   490  		skip = 1
   491  	case GOOS == "solaris" || GOOS == "illumos":
   492  		// These platforms don't use a libc_read_trampoline.
   493  		skip = 3
   494  	default:
   495  		// Skip the extra trampoline frame used on most systems.
   496  		skip = 4
   497  	}
   498  	// Scribble down the M that the P is currently attached to.
   499  	pp := tl.mp.p.ptr()
   500  	pp.trace.mSyscallID = int64(tl.mp.procid)
   501  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(skip))
   502  }
   503  
   504  // GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
   505  // if lostP is true.
   506  //
   507  // lostP must be true in all cases that a goroutine loses its P during a syscall.
   508  // This means it's not sufficient to check if it has no P. In particular, it needs to be
   509  // true in the following cases:
   510  // - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
   511  // - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
   512  // - The goroutine lost its P and acquired a different one, and is now running with that P.
   513  func (tl traceLocker) GoSysExit(lostP bool) {
   514  	ev := traceEvGoSyscallEnd
   515  	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
   516  	if lostP {
   517  		ev = traceEvGoSyscallEndBlocked
   518  		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
   519  	} else {
   520  		tl.mp.p.ptr().trace.mSyscallID = -1
   521  	}
   522  	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
   523  }
   524  
   525  // ProcSteal indicates that our current M stole a P from another M.
   526  //
   527  // inSyscall indicates that we're stealing the P from a syscall context.
   528  //
   529  // The caller must have ownership of pp.
   530  func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
   531  	// Grab the M ID we stole from.
   532  	mStolenFrom := pp.trace.mSyscallID
   533  	pp.trace.mSyscallID = -1
   534  
   535  	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
   536  	// context of just emitting this event alone. There are two cases. Either we're trying to steal
   537  	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
   538  	// ourselves specifically to keep running. The two contexts look different, but can be summarized
   539  	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
   540  	// In the latter, we're a goroutine in a syscall.
   541  	goStatus := traceGoRunning
   542  	procStatus := traceProcRunning
   543  	if inSyscall {
   544  		goStatus = traceGoSyscall
   545  		procStatus = traceProcSyscallAbandoned
   546  	}
   547  	w := tl.eventWriter(goStatus, procStatus)
   548  
   549  	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
   550  	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
   551  	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
   552  	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
   553  	// at all (e.g. entersyscall_gcwait).
   554  	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
   555  		// Careful: don't use the event writer. We never want status or in-progress events
   556  		// to trigger more in-progress events.
   557  		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
   558  	}
   559  	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
   560  }
   561  
   562  // GoSysBlock is a no-op in the new tracer.
   563  func (tl traceLocker) GoSysBlock(pp *p) {
   564  }
   565  
   566  // HeapAlloc emits a HeapAlloc event.
   567  func (tl traceLocker) HeapAlloc(live uint64) {
   568  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
   569  }
   570  
   571  // HeapGoal reads the current heap goal and emits a HeapGoal event.
   572  func (tl traceLocker) HeapGoal() {
   573  	heapGoal := gcController.heapGoal()
   574  	if heapGoal == ^uint64(0) {
   575  		// Heap-based triggering is disabled.
   576  		heapGoal = 0
   577  	}
   578  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
   579  }
   580  
   581  // OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
   582  // it's a good place to insert a thread-level event about the new extra M.
   583  func (tl traceLocker) OneNewExtraM(_ *g) {
   584  }
   585  
   586  // GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
   587  //
   588  // Unlike GoCreate, the caller must be running on gp.
   589  //
   590  // This occurs when C code calls into Go. On pthread platforms it occurs only when
   591  // a C thread calls into Go code for the first time.
   592  func (tl traceLocker) GoCreateSyscall(gp *g) {
   593  	// N.B. We should never trace a status for this goroutine (which we're currently running on),
   594  	// since we want this to appear like goroutine creation.
   595  	gp.trace.setStatusTraced(tl.gen)
   596  	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
   597  }
   598  
   599  // GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
   600  //
   601  // Must not have a P.
   602  //
   603  // This occurs when Go code returns back to C. On pthread platforms it occurs only when
   604  // the C thread is destroyed.
   605  func (tl traceLocker) GoDestroySyscall() {
   606  	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
   607  	// that is in the syscall state.
   608  	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
   609  }
   610  
   611  // To access runtime functions from runtime/trace.
   612  // See runtime/trace/annotation.go
   613  
   614  // trace_userTaskCreate emits a UserTaskCreate event.
   615  //
   616  //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
   617  func trace_userTaskCreate(id, parentID uint64, taskType string) {
   618  	tl := traceAcquire()
   619  	if !tl.ok() {
   620  		// Need to do this check because the caller won't have it.
   621  		return
   622  	}
   623  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
   624  	traceRelease(tl)
   625  }
   626  
   627  // trace_userTaskEnd emits a UserTaskEnd event.
   628  //
   629  //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
   630  func trace_userTaskEnd(id uint64) {
   631  	tl := traceAcquire()
   632  	if !tl.ok() {
   633  		// Need to do this check because the caller won't have it.
   634  		return
   635  	}
   636  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
   637  	traceRelease(tl)
   638  }
   639  
   640  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
   641  // depending on mode (0 == Begin, 1 == End).
   642  //
   643  // TODO(mknyszek): Just make this two functions.
   644  //
   645  //go:linkname trace_userRegion runtime/trace.userRegion
   646  func trace_userRegion(id, mode uint64, name string) {
   647  	tl := traceAcquire()
   648  	if !tl.ok() {
   649  		// Need to do this check because the caller won't have it.
   650  		return
   651  	}
   652  	var ev traceEv
   653  	switch mode {
   654  	case 0:
   655  		ev = traceEvUserRegionBegin
   656  	case 1:
   657  		ev = traceEvUserRegionEnd
   658  	default:
   659  		return
   660  	}
   661  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
   662  	traceRelease(tl)
   663  }
   664  
   665  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
   666  //
   667  //go:linkname trace_userLog runtime/trace.userLog
   668  func trace_userLog(id uint64, category, message string) {
   669  	tl := traceAcquire()
   670  	if !tl.ok() {
   671  		// Need to do this check because the caller won't have it.
   672  		return
   673  	}
   674  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
   675  	traceRelease(tl)
   676  }
   677  
   678  // traceProcFree is called when a P is destroyed.
   679  //
   680  // This must run on the system stack to match the old tracer.
   681  //
   682  //go:systemstack
   683  func traceProcFree(_ *p) {
   684  }
   685  
   686  // traceThreadDestroy is called when a thread is removed from
   687  // sched.freem.
   688  //
   689  // mp must not be able to emit trace events anymore.
   690  //
   691  // sched.lock must be held to synchronize with traceAdvance.
   692  func traceThreadDestroy(mp *m) {
   693  	assertLockHeld(&sched.lock)
   694  
   695  	// Flush all outstanding buffers to maintain the invariant
   696  	// that an M only has active buffers while on sched.freem
   697  	// or allm.
   698  	//
   699  	// Perform a traceAcquire/traceRelease on behalf of mp to
   700  	// synchronize with the tracer trying to flush our buffer
   701  	// as well.
   702  	seq := mp.trace.seqlock.Add(1)
   703  	if debugTraceReentrancy && seq%2 != 1 {
   704  		throw("bad use of trace.seqlock or tracer is reentrant")
   705  	}
   706  	systemstack(func() {
   707  		lock(&trace.lock)
   708  		for i := range mp.trace.buf {
   709  			if mp.trace.buf[i] != nil {
   710  				// N.B. traceBufFlush accepts a generation, but it
   711  				// really just cares about gen%2.
   712  				traceBufFlush(mp.trace.buf[i], uintptr(i))
   713  				mp.trace.buf[i] = nil
   714  			}
   715  		}
   716  		unlock(&trace.lock)
   717  	})
   718  	seq1 := mp.trace.seqlock.Add(1)
   719  	if seq1 != seq+1 {
   720  		print("runtime: seq1=", seq1, "\n")
   721  		throw("bad use of trace.seqlock")
   722  	}
   723  }
   724  
   725  // Not used in the new tracer; solely for compatibility with the old tracer.
   726  // nosplit because it's called from exitsyscall without a P.
   727  //
   728  //go:nosplit
   729  func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
   730  }