github.com/AESNooper/go/src@v0.0.0-20220218095104-b56a4ab1bbbb/runtime/trace.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import (
    16  	"internal/goarch"
    17  	"runtime/internal/atomic"
    18  	"runtime/internal/sys"
    19  	"unsafe"
    20  )
    21  
    22  // Event types in the trace, args are given in square brackets.
    23  const (
    24  	traceEvNone              = 0  // unused
    25  	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
    26  	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
    27  	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
    28  	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    29  	traceEvProcStart         = 5  // start of P [timestamp, thread id]
    30  	traceEvProcStop          = 6  // stop of P [timestamp]
    31  	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
    32  	traceEvGCDone            = 8  // GC done [timestamp]
    33  	traceEvGCSTWStart        = 9  // GC STW start [timestamp, kind]
    34  	traceEvGCSTWDone         = 10 // GC STW done [timestamp]
    35  	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
    36  	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
    37  	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
    38  	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
    39  	traceEvGoEnd             = 15 // goroutine ends [timestamp]
    40  	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
    41  	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
    42  	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
    43  	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
    44  	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
    45  	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
    46  	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
    47  	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
    48  	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
    49  	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    50  	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
    51  	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
    52  	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
    53  	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
    54  	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
    55  	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
    56  	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
    57  	traceEvHeapAlloc         = 33 // gcController.heapLive change [timestamp, heap_alloc]
    58  	traceEvHeapGoal          = 34 // gcController.heapGoal (formerly next_gc) change [timestamp, heap goal in bytes]
    59  	traceEvTimerGoroutine    = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
    60  	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    61  	traceEvString            = 37 // string dictionary entry [ID, length, string]
    62  	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
    63  	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
    64  	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
    65  	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
    66  	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
    67  	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
    68  	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
    69  	traceEvUserTaskCreate    = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
    70  	traceEvUserTaskEnd       = 46 // end of a task [timestamp, internal task id, stack]
    71  	traceEvUserRegion        = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
    72  	traceEvUserLog           = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
    73  	traceEvCount             = 49
    74  	// Byte is used but only 6 bits are available for event type.
    75  	// The remaining 2 bits are used to specify the number of arguments.
    76  	// That means, the max event type value is 63.
    77  )
    78  
    79  const (
    80  	// Timestamps in trace are cputicks/traceTickDiv.
    81  	// This makes absolute values of timestamp diffs smaller,
    82  	// and so they are encoded in less number of bytes.
    83  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    84  	// The suggested increment frequency for PowerPC's time base register is
    85  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    86  	// and ppc64le.
    87  	// Tracing won't work reliably for architectures where cputicks is emulated
    88  	// by nanotime, so the value doesn't matter for those architectures.
    89  	traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
    90  	// Maximum number of PCs in a single stack trace.
    91  	// Since events contain only stack id rather than whole stack trace,
    92  	// we can allow quite large values here.
    93  	traceStackSize = 128
    94  	// Identifier of a fake P that is used when we trace without a real P.
    95  	traceGlobProc = -1
    96  	// Maximum number of bytes to encode uint64 in base-128.
    97  	traceBytesPerNumber = 10
    98  	// Shift of the number of arguments in the first event byte.
    99  	traceArgCountShift = 6
   100  	// Flag passed to traceGoPark to denote that the previous wakeup of this
   101  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
   102  	// but another goroutine got ahead and acquired the mutex before the first
   103  	// goroutine is scheduled, so the first goroutine has to block again.
   104  	// Such wakeups happen on buffered channels and sync.Mutex,
   105  	// but are generally not interesting for end user.
   106  	traceFutileWakeup byte = 128
   107  )
   108  
   109  // trace is global tracing context.
   110  var trace struct {
   111  	lock          mutex       // protects the following members
   112  	lockOwner     *g          // to avoid deadlocks during recursive lock locks
   113  	enabled       bool        // when set runtime traces events
   114  	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
   115  	headerWritten bool        // whether ReadTrace has emitted trace header
   116  	footerWritten bool        // whether ReadTrace has emitted trace footer
   117  	shutdownSema  uint32      // used to wait for ReadTrace completion
   118  	seqStart      uint64      // sequence number when tracing was started
   119  	ticksStart    int64       // cputicks when tracing was started
   120  	ticksEnd      int64       // cputicks when tracing was stopped
   121  	timeStart     int64       // nanotime when tracing was started
   122  	timeEnd       int64       // nanotime when tracing was stopped
   123  	seqGC         uint64      // GC start/done sequencer
   124  	reading       traceBufPtr // buffer currently handed off to user
   125  	empty         traceBufPtr // stack of empty buffers
   126  	fullHead      traceBufPtr // queue of full buffers
   127  	fullTail      traceBufPtr
   128  	reader        guintptr        // goroutine that called ReadTrace, or nil
   129  	stackTab      traceStackTable // maps stack traces to unique ids
   130  
   131  	// Dictionary for traceEvString.
   132  	//
   133  	// TODO: central lock to access the map is not ideal.
   134  	//   option: pre-assign ids to all user annotation region names and tags
   135  	//   option: per-P cache
   136  	//   option: sync.Map like data structure
   137  	stringsLock mutex
   138  	strings     map[string]uint64
   139  	stringSeq   uint64
   140  
   141  	// markWorkerLabels maps gcMarkWorkerMode to string ID.
   142  	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
   143  
   144  	bufLock mutex       // protects buf
   145  	buf     traceBufPtr // global trace buffer, used when running without a p
   146  }
   147  
   148  // traceBufHeader is per-P tracing buffer.
   149  type traceBufHeader struct {
   150  	link      traceBufPtr             // in trace.empty/full
   151  	lastTicks uint64                  // when we wrote the last event
   152  	pos       int                     // next write offset in arr
   153  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   154  }
   155  
   156  // traceBuf is per-P tracing buffer.
   157  //
   158  //go:notinheap
   159  type traceBuf struct {
   160  	traceBufHeader
   161  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   162  }
   163  
   164  // traceBufPtr is a *traceBuf that is not traced by the garbage
   165  // collector and doesn't have write barriers. traceBufs are not
   166  // allocated from the GC'd heap, so this is safe, and are often
   167  // manipulated in contexts where write barriers are not allowed, so
   168  // this is necessary.
   169  //
   170  // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
   171  type traceBufPtr uintptr
   172  
   173  func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   174  func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   175  func traceBufPtrOf(b *traceBuf) traceBufPtr {
   176  	return traceBufPtr(unsafe.Pointer(b))
   177  }
   178  
   179  // StartTrace enables tracing for the current process.
   180  // While tracing, the data will be buffered and available via ReadTrace.
   181  // StartTrace returns an error if tracing is already enabled.
   182  // Most clients should use the runtime/trace package or the testing package's
   183  // -test.trace flag instead of calling StartTrace directly.
   184  func StartTrace() error {
   185  	// Stop the world so that we can take a consistent snapshot
   186  	// of all goroutines at the beginning of the trace.
   187  	// Do not stop the world during GC so we ensure we always see
   188  	// a consistent view of GC-related events (e.g. a start is always
   189  	// paired with an end).
   190  	stopTheWorldGC("start tracing")
   191  
   192  	// Prevent sysmon from running any code that could generate events.
   193  	lock(&sched.sysmonlock)
   194  
   195  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   196  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   197  	// and decide to write to trace at a random point in time.
   198  	// However, such syscall will use the global trace.buf buffer, because we've
   199  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   200  	lock(&trace.bufLock)
   201  
   202  	if trace.enabled || trace.shutdown {
   203  		unlock(&trace.bufLock)
   204  		unlock(&sched.sysmonlock)
   205  		startTheWorldGC()
   206  		return errorString("tracing is already enabled")
   207  	}
   208  
   209  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   210  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   211  	// That would lead to an inconsistent trace:
   212  	// - either GoSysExit appears before EvGoInSyscall,
   213  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   214  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   215  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   216  	_g_ := getg()
   217  	_g_.m.startingtrace = true
   218  
   219  	// Obtain current stack ID to use in all traceEvGoCreate events below.
   220  	mp := acquirem()
   221  	stkBuf := make([]uintptr, traceStackSize)
   222  	stackID := traceStackID(mp, stkBuf, 2)
   223  	releasem(mp)
   224  
   225  	// World is stopped, no need to lock.
   226  	forEachGRace(func(gp *g) {
   227  		status := readgstatus(gp)
   228  		if status != _Gdead {
   229  			gp.traceseq = 0
   230  			gp.tracelastp = getg().m.p
   231  			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   232  			id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
   233  			traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
   234  		}
   235  		if status == _Gwaiting {
   236  			// traceEvGoWaiting is implied to have seq=1.
   237  			gp.traceseq++
   238  			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   239  		}
   240  		if status == _Gsyscall {
   241  			gp.traceseq++
   242  			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   243  		} else {
   244  			gp.sysblocktraced = false
   245  		}
   246  	})
   247  	traceProcStart()
   248  	traceGoStart()
   249  	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
   250  	// If we do it the other way around, it is possible that exitsyscall will
   251  	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
   252  	// It will lead to a false conclusion that cputicks is broken.
   253  	trace.ticksStart = cputicks()
   254  	trace.timeStart = nanotime()
   255  	trace.headerWritten = false
   256  	trace.footerWritten = false
   257  
   258  	// string to id mapping
   259  	//  0 : reserved for an empty string
   260  	//  remaining: other strings registered by traceString
   261  	trace.stringSeq = 0
   262  	trace.strings = make(map[string]uint64)
   263  
   264  	trace.seqGC = 0
   265  	_g_.m.startingtrace = false
   266  	trace.enabled = true
   267  
   268  	// Register runtime goroutine labels.
   269  	_, pid, bufp := traceAcquireBuffer()
   270  	for i, label := range gcMarkWorkerModeStrings[:] {
   271  		trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
   272  	}
   273  	traceReleaseBuffer(pid)
   274  
   275  	unlock(&trace.bufLock)
   276  
   277  	unlock(&sched.sysmonlock)
   278  
   279  	startTheWorldGC()
   280  	return nil
   281  }
   282  
   283  // StopTrace stops tracing, if it was previously enabled.
   284  // StopTrace only returns after all the reads for the trace have completed.
   285  func StopTrace() {
   286  	// Stop the world so that we can collect the trace buffers from all p's below,
   287  	// and also to avoid races with traceEvent.
   288  	stopTheWorldGC("stop tracing")
   289  
   290  	// See the comment in StartTrace.
   291  	lock(&sched.sysmonlock)
   292  
   293  	// See the comment in StartTrace.
   294  	lock(&trace.bufLock)
   295  
   296  	if !trace.enabled {
   297  		unlock(&trace.bufLock)
   298  		unlock(&sched.sysmonlock)
   299  		startTheWorldGC()
   300  		return
   301  	}
   302  
   303  	traceGoSched()
   304  
   305  	// Loop over all allocated Ps because dead Ps may still have
   306  	// trace buffers.
   307  	for _, p := range allp[:cap(allp)] {
   308  		buf := p.tracebuf
   309  		if buf != 0 {
   310  			traceFullQueue(buf)
   311  			p.tracebuf = 0
   312  		}
   313  	}
   314  	if trace.buf != 0 {
   315  		buf := trace.buf
   316  		trace.buf = 0
   317  		if buf.ptr().pos != 0 {
   318  			traceFullQueue(buf)
   319  		}
   320  	}
   321  
   322  	for {
   323  		trace.ticksEnd = cputicks()
   324  		trace.timeEnd = nanotime()
   325  		// Windows time can tick only every 15ms, wait for at least one tick.
   326  		if trace.timeEnd != trace.timeStart {
   327  			break
   328  		}
   329  		osyield()
   330  	}
   331  
   332  	trace.enabled = false
   333  	trace.shutdown = true
   334  	unlock(&trace.bufLock)
   335  
   336  	unlock(&sched.sysmonlock)
   337  
   338  	startTheWorldGC()
   339  
   340  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   341  	// Wait for the trace reader to flush pending buffers and stop.
   342  	semacquire(&trace.shutdownSema)
   343  	if raceenabled {
   344  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   345  	}
   346  
   347  	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   348  	lock(&trace.lock)
   349  	for _, p := range allp[:cap(allp)] {
   350  		if p.tracebuf != 0 {
   351  			throw("trace: non-empty trace buffer in proc")
   352  		}
   353  	}
   354  	if trace.buf != 0 {
   355  		throw("trace: non-empty global trace buffer")
   356  	}
   357  	if trace.fullHead != 0 || trace.fullTail != 0 {
   358  		throw("trace: non-empty full trace buffer")
   359  	}
   360  	if trace.reading != 0 || trace.reader != 0 {
   361  		throw("trace: reading after shutdown")
   362  	}
   363  	for trace.empty != 0 {
   364  		buf := trace.empty
   365  		trace.empty = buf.ptr().link
   366  		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   367  	}
   368  	trace.strings = nil
   369  	trace.shutdown = false
   370  	unlock(&trace.lock)
   371  }
   372  
   373  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   374  // is available. If tracing is turned off and all the data accumulated while it
   375  // was on has been returned, ReadTrace returns nil. The caller must copy the
   376  // returned data before calling ReadTrace again.
   377  // ReadTrace must be called from one goroutine at a time.
   378  func ReadTrace() []byte {
   379  	// This function may need to lock trace.lock recursively
   380  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   381  	// To allow this we use trace.lockOwner.
   382  	// Also this function must not allocate while holding trace.lock:
   383  	// allocation can call heap allocate, which will try to emit a trace
   384  	// event while holding heap lock.
   385  	lock(&trace.lock)
   386  	trace.lockOwner = getg()
   387  
   388  	if trace.reader != 0 {
   389  		// More than one goroutine reads trace. This is bad.
   390  		// But we rather do not crash the program because of tracing,
   391  		// because tracing can be enabled at runtime on prod servers.
   392  		trace.lockOwner = nil
   393  		unlock(&trace.lock)
   394  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   395  		return nil
   396  	}
   397  	// Recycle the old buffer.
   398  	if buf := trace.reading; buf != 0 {
   399  		buf.ptr().link = trace.empty
   400  		trace.empty = buf
   401  		trace.reading = 0
   402  	}
   403  	// Write trace header.
   404  	if !trace.headerWritten {
   405  		trace.headerWritten = true
   406  		trace.lockOwner = nil
   407  		unlock(&trace.lock)
   408  		return []byte("go 1.11 trace\x00\x00\x00")
   409  	}
   410  	// Wait for new data.
   411  	if trace.fullHead == 0 && !trace.shutdown {
   412  		trace.reader.set(getg())
   413  		goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
   414  		lock(&trace.lock)
   415  	}
   416  	// Write a buffer.
   417  	if trace.fullHead != 0 {
   418  		buf := traceFullDequeue()
   419  		trace.reading = buf
   420  		trace.lockOwner = nil
   421  		unlock(&trace.lock)
   422  		return buf.ptr().arr[:buf.ptr().pos]
   423  	}
   424  	// Write footer with timer frequency.
   425  	if !trace.footerWritten {
   426  		trace.footerWritten = true
   427  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   428  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   429  		trace.lockOwner = nil
   430  		unlock(&trace.lock)
   431  		var data []byte
   432  		data = append(data, traceEvFrequency|0<<traceArgCountShift)
   433  		data = traceAppend(data, uint64(freq))
   434  		// This will emit a bunch of full buffers, we will pick them up
   435  		// on the next iteration.
   436  		trace.stackTab.dump()
   437  		return data
   438  	}
   439  	// Done.
   440  	if trace.shutdown {
   441  		trace.lockOwner = nil
   442  		unlock(&trace.lock)
   443  		if raceenabled {
   444  			// Model synchronization on trace.shutdownSema, which race
   445  			// detector does not see. This is required to avoid false
   446  			// race reports on writer passed to trace.Start.
   447  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   448  		}
   449  		// trace.enabled is already reset, so can call traceable functions.
   450  		semrelease(&trace.shutdownSema)
   451  		return nil
   452  	}
   453  	// Also bad, but see the comment above.
   454  	trace.lockOwner = nil
   455  	unlock(&trace.lock)
   456  	println("runtime: spurious wakeup of trace reader")
   457  	return nil
   458  }
   459  
   460  // traceReader returns the trace reader that should be woken up, if any.
   461  func traceReader() *g {
   462  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   463  		return nil
   464  	}
   465  	lock(&trace.lock)
   466  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   467  		unlock(&trace.lock)
   468  		return nil
   469  	}
   470  	gp := trace.reader.ptr()
   471  	trace.reader.set(nil)
   472  	unlock(&trace.lock)
   473  	return gp
   474  }
   475  
   476  // traceProcFree frees trace buffer associated with pp.
   477  func traceProcFree(pp *p) {
   478  	buf := pp.tracebuf
   479  	pp.tracebuf = 0
   480  	if buf == 0 {
   481  		return
   482  	}
   483  	lock(&trace.lock)
   484  	traceFullQueue(buf)
   485  	unlock(&trace.lock)
   486  }
   487  
   488  // traceFullQueue queues buf into queue of full buffers.
   489  func traceFullQueue(buf traceBufPtr) {
   490  	buf.ptr().link = 0
   491  	if trace.fullHead == 0 {
   492  		trace.fullHead = buf
   493  	} else {
   494  		trace.fullTail.ptr().link = buf
   495  	}
   496  	trace.fullTail = buf
   497  }
   498  
   499  // traceFullDequeue dequeues from queue of full buffers.
   500  func traceFullDequeue() traceBufPtr {
   501  	buf := trace.fullHead
   502  	if buf == 0 {
   503  		return 0
   504  	}
   505  	trace.fullHead = buf.ptr().link
   506  	if trace.fullHead == 0 {
   507  		trace.fullTail = 0
   508  	}
   509  	buf.ptr().link = 0
   510  	return buf
   511  }
   512  
   513  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   514  // ev is event type.
   515  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   516  // If skip = 0, this event type should contain a stack, but we don't want
   517  // to collect and remember it for this particular call.
   518  func traceEvent(ev byte, skip int, args ...uint64) {
   519  	mp, pid, bufp := traceAcquireBuffer()
   520  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   521  	// This protects from races between traceEvent and StartTrace/StopTrace.
   522  
   523  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   524  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   525  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   526  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   527  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   528  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   529  	//
   530  	// Note trace_userTaskCreate runs the same check.
   531  	if !trace.enabled && !mp.startingtrace {
   532  		traceReleaseBuffer(pid)
   533  		return
   534  	}
   535  
   536  	if skip > 0 {
   537  		if getg() == mp.curg {
   538  			skip++ // +1 because stack is captured in traceEventLocked.
   539  		}
   540  	}
   541  	traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
   542  	traceReleaseBuffer(pid)
   543  }
   544  
   545  func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
   546  	buf := bufp.ptr()
   547  	// TODO: test on non-zero extraBytes param.
   548  	maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
   549  	if buf == nil || len(buf.arr)-buf.pos < maxSize {
   550  		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
   551  		bufp.set(buf)
   552  	}
   553  
   554  	// NOTE: ticks might be same after tick division, although the real cputicks is
   555  	// linear growth.
   556  	ticks := uint64(cputicks()) / traceTickDiv
   557  	tickDiff := ticks - buf.lastTicks
   558  	if tickDiff == 0 {
   559  		ticks = buf.lastTicks + 1
   560  		tickDiff = 1
   561  	}
   562  
   563  	buf.lastTicks = ticks
   564  	narg := byte(len(args))
   565  	if skip >= 0 {
   566  		narg++
   567  	}
   568  	// We have only 2 bits for number of arguments.
   569  	// If number is >= 3, then the event type is followed by event length in bytes.
   570  	if narg > 3 {
   571  		narg = 3
   572  	}
   573  	startPos := buf.pos
   574  	buf.byte(ev | narg<<traceArgCountShift)
   575  	var lenp *byte
   576  	if narg == 3 {
   577  		// Reserve the byte for length assuming that length < 128.
   578  		buf.varint(0)
   579  		lenp = &buf.arr[buf.pos-1]
   580  	}
   581  	buf.varint(tickDiff)
   582  	for _, a := range args {
   583  		buf.varint(a)
   584  	}
   585  	if skip == 0 {
   586  		buf.varint(0)
   587  	} else if skip > 0 {
   588  		buf.varint(traceStackID(mp, buf.stk[:], skip))
   589  	}
   590  	evSize := buf.pos - startPos
   591  	if evSize > maxSize {
   592  		throw("invalid length of trace event")
   593  	}
   594  	if lenp != nil {
   595  		// Fill in actual length.
   596  		*lenp = byte(evSize - 2)
   597  	}
   598  }
   599  
   600  func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
   601  	_g_ := getg()
   602  	gp := mp.curg
   603  	var nstk int
   604  	if gp == _g_ {
   605  		nstk = callers(skip+1, buf)
   606  	} else if gp != nil {
   607  		gp = mp.curg
   608  		nstk = gcallers(gp, skip, buf)
   609  	}
   610  	if nstk > 0 {
   611  		nstk-- // skip runtime.goexit
   612  	}
   613  	if nstk > 0 && gp.goid == 1 {
   614  		nstk-- // skip runtime.main
   615  	}
   616  	id := trace.stackTab.put(buf[:nstk])
   617  	return uint64(id)
   618  }
   619  
   620  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   621  func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   622  	mp = acquirem()
   623  	if p := mp.p.ptr(); p != nil {
   624  		return mp, p.id, &p.tracebuf
   625  	}
   626  	lock(&trace.bufLock)
   627  	return mp, traceGlobProc, &trace.buf
   628  }
   629  
   630  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   631  func traceReleaseBuffer(pid int32) {
   632  	if pid == traceGlobProc {
   633  		unlock(&trace.bufLock)
   634  	}
   635  	releasem(getg().m)
   636  }
   637  
   638  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   639  func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
   640  	owner := trace.lockOwner
   641  	dolock := owner == nil || owner != getg().m.curg
   642  	if dolock {
   643  		lock(&trace.lock)
   644  	}
   645  	if buf != 0 {
   646  		traceFullQueue(buf)
   647  	}
   648  	if trace.empty != 0 {
   649  		buf = trace.empty
   650  		trace.empty = buf.ptr().link
   651  	} else {
   652  		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   653  		if buf == 0 {
   654  			throw("trace: out of memory")
   655  		}
   656  	}
   657  	bufp := buf.ptr()
   658  	bufp.link.set(nil)
   659  	bufp.pos = 0
   660  
   661  	// initialize the buffer for a new batch
   662  	ticks := uint64(cputicks()) / traceTickDiv
   663  	if ticks == bufp.lastTicks {
   664  		ticks = bufp.lastTicks + 1
   665  	}
   666  	bufp.lastTicks = ticks
   667  	bufp.byte(traceEvBatch | 1<<traceArgCountShift)
   668  	bufp.varint(uint64(pid))
   669  	bufp.varint(ticks)
   670  
   671  	if dolock {
   672  		unlock(&trace.lock)
   673  	}
   674  	return buf
   675  }
   676  
   677  // traceString adds a string to the trace.strings and returns the id.
   678  func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
   679  	if s == "" {
   680  		return 0, bufp
   681  	}
   682  
   683  	lock(&trace.stringsLock)
   684  	if raceenabled {
   685  		// raceacquire is necessary because the map access
   686  		// below is race annotated.
   687  		raceacquire(unsafe.Pointer(&trace.stringsLock))
   688  	}
   689  
   690  	if id, ok := trace.strings[s]; ok {
   691  		if raceenabled {
   692  			racerelease(unsafe.Pointer(&trace.stringsLock))
   693  		}
   694  		unlock(&trace.stringsLock)
   695  
   696  		return id, bufp
   697  	}
   698  
   699  	trace.stringSeq++
   700  	id := trace.stringSeq
   701  	trace.strings[s] = id
   702  
   703  	if raceenabled {
   704  		racerelease(unsafe.Pointer(&trace.stringsLock))
   705  	}
   706  	unlock(&trace.stringsLock)
   707  
   708  	// memory allocation in above may trigger tracing and
   709  	// cause *bufp changes. Following code now works with *bufp,
   710  	// so there must be no memory allocation or any activities
   711  	// that causes tracing after this point.
   712  
   713  	buf := bufp.ptr()
   714  	size := 1 + 2*traceBytesPerNumber + len(s)
   715  	if buf == nil || len(buf.arr)-buf.pos < size {
   716  		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
   717  		bufp.set(buf)
   718  	}
   719  	buf.byte(traceEvString)
   720  	buf.varint(id)
   721  
   722  	// double-check the string and the length can fit.
   723  	// Otherwise, truncate the string.
   724  	slen := len(s)
   725  	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
   726  		slen = room
   727  	}
   728  
   729  	buf.varint(uint64(slen))
   730  	buf.pos += copy(buf.arr[buf.pos:], s[:slen])
   731  
   732  	bufp.set(buf)
   733  	return id, bufp
   734  }
   735  
   736  // traceAppend appends v to buf in little-endian-base-128 encoding.
   737  func traceAppend(buf []byte, v uint64) []byte {
   738  	for ; v >= 0x80; v >>= 7 {
   739  		buf = append(buf, 0x80|byte(v))
   740  	}
   741  	buf = append(buf, byte(v))
   742  	return buf
   743  }
   744  
   745  // varint appends v to buf in little-endian-base-128 encoding.
   746  func (buf *traceBuf) varint(v uint64) {
   747  	pos := buf.pos
   748  	for ; v >= 0x80; v >>= 7 {
   749  		buf.arr[pos] = 0x80 | byte(v)
   750  		pos++
   751  	}
   752  	buf.arr[pos] = byte(v)
   753  	pos++
   754  	buf.pos = pos
   755  }
   756  
   757  // byte appends v to buf.
   758  func (buf *traceBuf) byte(v byte) {
   759  	buf.arr[buf.pos] = v
   760  	buf.pos++
   761  }
   762  
   763  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   764  // It is lock-free for reading.
   765  type traceStackTable struct {
   766  	lock mutex
   767  	seq  uint32
   768  	mem  traceAlloc
   769  	tab  [1 << 13]traceStackPtr
   770  }
   771  
   772  // traceStack is a single stack in traceStackTable.
   773  type traceStack struct {
   774  	link traceStackPtr
   775  	hash uintptr
   776  	id   uint32
   777  	n    int
   778  	stk  [0]uintptr // real type [n]uintptr
   779  }
   780  
   781  type traceStackPtr uintptr
   782  
   783  func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
   784  
   785  // stack returns slice of PCs.
   786  func (ts *traceStack) stack() []uintptr {
   787  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   788  }
   789  
   790  // put returns a unique id for the stack trace pcs and caches it in the table,
   791  // if it sees the trace for the first time.
   792  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   793  	if len(pcs) == 0 {
   794  		return 0
   795  	}
   796  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   797  	// First, search the hashtable w/o the mutex.
   798  	if id := tab.find(pcs, hash); id != 0 {
   799  		return id
   800  	}
   801  	// Now, double check under the mutex.
   802  	lock(&tab.lock)
   803  	if id := tab.find(pcs, hash); id != 0 {
   804  		unlock(&tab.lock)
   805  		return id
   806  	}
   807  	// Create new record.
   808  	tab.seq++
   809  	stk := tab.newStack(len(pcs))
   810  	stk.hash = hash
   811  	stk.id = tab.seq
   812  	stk.n = len(pcs)
   813  	stkpc := stk.stack()
   814  	for i, pc := range pcs {
   815  		stkpc[i] = pc
   816  	}
   817  	part := int(hash % uintptr(len(tab.tab)))
   818  	stk.link = tab.tab[part]
   819  	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   820  	unlock(&tab.lock)
   821  	return stk.id
   822  }
   823  
   824  // find checks if the stack trace pcs is already present in the table.
   825  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   826  	part := int(hash % uintptr(len(tab.tab)))
   827  Search:
   828  	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
   829  		if stk.hash == hash && stk.n == len(pcs) {
   830  			for i, stkpc := range stk.stack() {
   831  				if stkpc != pcs[i] {
   832  					continue Search
   833  				}
   834  			}
   835  			return stk.id
   836  		}
   837  	}
   838  	return 0
   839  }
   840  
   841  // newStack allocates a new stack of size n.
   842  func (tab *traceStackTable) newStack(n int) *traceStack {
   843  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
   844  }
   845  
   846  // allFrames returns all of the Frames corresponding to pcs.
   847  func allFrames(pcs []uintptr) []Frame {
   848  	frames := make([]Frame, 0, len(pcs))
   849  	ci := CallersFrames(pcs)
   850  	for {
   851  		f, more := ci.Next()
   852  		frames = append(frames, f)
   853  		if !more {
   854  			return frames
   855  		}
   856  	}
   857  }
   858  
   859  // dump writes all previously cached stacks to trace buffers,
   860  // releases all memory and resets state.
   861  func (tab *traceStackTable) dump() {
   862  	var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
   863  	bufp := traceFlush(0, 0)
   864  	for _, stk := range tab.tab {
   865  		stk := stk.ptr()
   866  		for ; stk != nil; stk = stk.link.ptr() {
   867  			tmpbuf := tmp[:0]
   868  			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   869  			frames := allFrames(stk.stack())
   870  			tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
   871  			for _, f := range frames {
   872  				var frame traceFrame
   873  				frame, bufp = traceFrameForPC(bufp, 0, f)
   874  				tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
   875  				tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
   876  				tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
   877  				tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
   878  			}
   879  			// Now copy to the buffer.
   880  			size := 1 + traceBytesPerNumber + len(tmpbuf)
   881  			if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
   882  				bufp = traceFlush(bufp, 0)
   883  			}
   884  			buf := bufp.ptr()
   885  			buf.byte(traceEvStack | 3<<traceArgCountShift)
   886  			buf.varint(uint64(len(tmpbuf)))
   887  			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
   888  		}
   889  	}
   890  
   891  	lock(&trace.lock)
   892  	traceFullQueue(bufp)
   893  	unlock(&trace.lock)
   894  
   895  	tab.mem.drop()
   896  	*tab = traceStackTable{}
   897  	lockInit(&((*tab).lock), lockRankTraceStackTab)
   898  }
   899  
   900  type traceFrame struct {
   901  	funcID uint64
   902  	fileID uint64
   903  	line   uint64
   904  }
   905  
   906  // traceFrameForPC records the frame information.
   907  // It may allocate memory.
   908  func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
   909  	bufp := &buf
   910  	var frame traceFrame
   911  
   912  	fn := f.Function
   913  	const maxLen = 1 << 10
   914  	if len(fn) > maxLen {
   915  		fn = fn[len(fn)-maxLen:]
   916  	}
   917  	frame.funcID, bufp = traceString(bufp, pid, fn)
   918  	frame.line = uint64(f.Line)
   919  	file := f.File
   920  	if len(file) > maxLen {
   921  		file = file[len(file)-maxLen:]
   922  	}
   923  	frame.fileID, bufp = traceString(bufp, pid, file)
   924  	return frame, (*bufp)
   925  }
   926  
   927  // traceAlloc is a non-thread-safe region allocator.
   928  // It holds a linked list of traceAllocBlock.
   929  type traceAlloc struct {
   930  	head traceAllocBlockPtr
   931  	off  uintptr
   932  }
   933  
   934  // traceAllocBlock is a block in traceAlloc.
   935  //
   936  // traceAllocBlock is allocated from non-GC'd memory, so it must not
   937  // contain heap pointers. Writes to pointers to traceAllocBlocks do
   938  // not need write barriers.
   939  //
   940  //go:notinheap
   941  type traceAllocBlock struct {
   942  	next traceAllocBlockPtr
   943  	data [64<<10 - goarch.PtrSize]byte
   944  }
   945  
   946  // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
   947  type traceAllocBlockPtr uintptr
   948  
   949  func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
   950  func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
   951  
   952  // alloc allocates n-byte block.
   953  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   954  	n = alignUp(n, goarch.PtrSize)
   955  	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
   956  		if n > uintptr(len(a.head.ptr().data)) {
   957  			throw("trace: alloc too large")
   958  		}
   959  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   960  		if block == nil {
   961  			throw("trace: out of memory")
   962  		}
   963  		block.next.set(a.head.ptr())
   964  		a.head.set(block)
   965  		a.off = 0
   966  	}
   967  	p := &a.head.ptr().data[a.off]
   968  	a.off += n
   969  	return unsafe.Pointer(p)
   970  }
   971  
   972  // drop frees all previously allocated memory and resets the allocator.
   973  func (a *traceAlloc) drop() {
   974  	for a.head != 0 {
   975  		block := a.head.ptr()
   976  		a.head.set(block.next.ptr())
   977  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   978  	}
   979  }
   980  
   981  // The following functions write specific events to trace.
   982  
   983  func traceGomaxprocs(procs int32) {
   984  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   985  }
   986  
   987  func traceProcStart() {
   988  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   989  }
   990  
   991  func traceProcStop(pp *p) {
   992  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   993  	// to handle this we temporary employ the P.
   994  	mp := acquirem()
   995  	oldp := mp.p
   996  	mp.p.set(pp)
   997  	traceEvent(traceEvProcStop, -1)
   998  	mp.p = oldp
   999  	releasem(mp)
  1000  }
  1001  
  1002  func traceGCStart() {
  1003  	traceEvent(traceEvGCStart, 3, trace.seqGC)
  1004  	trace.seqGC++
  1005  }
  1006  
  1007  func traceGCDone() {
  1008  	traceEvent(traceEvGCDone, -1)
  1009  }
  1010  
  1011  func traceGCSTWStart(kind int) {
  1012  	traceEvent(traceEvGCSTWStart, -1, uint64(kind))
  1013  }
  1014  
  1015  func traceGCSTWDone() {
  1016  	traceEvent(traceEvGCSTWDone, -1)
  1017  }
  1018  
  1019  // traceGCSweepStart prepares to trace a sweep loop. This does not
  1020  // emit any events until traceGCSweepSpan is called.
  1021  //
  1022  // traceGCSweepStart must be paired with traceGCSweepDone and there
  1023  // must be no preemption points between these two calls.
  1024  func traceGCSweepStart() {
  1025  	// Delay the actual GCSweepStart event until the first span
  1026  	// sweep. If we don't sweep anything, don't emit any events.
  1027  	_p_ := getg().m.p.ptr()
  1028  	if _p_.traceSweep {
  1029  		throw("double traceGCSweepStart")
  1030  	}
  1031  	_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
  1032  }
  1033  
  1034  // traceGCSweepSpan traces the sweep of a single page.
  1035  //
  1036  // This may be called outside a traceGCSweepStart/traceGCSweepDone
  1037  // pair; however, it will not emit any trace events in this case.
  1038  func traceGCSweepSpan(bytesSwept uintptr) {
  1039  	_p_ := getg().m.p.ptr()
  1040  	if _p_.traceSweep {
  1041  		if _p_.traceSwept == 0 {
  1042  			traceEvent(traceEvGCSweepStart, 1)
  1043  		}
  1044  		_p_.traceSwept += bytesSwept
  1045  	}
  1046  }
  1047  
  1048  func traceGCSweepDone() {
  1049  	_p_ := getg().m.p.ptr()
  1050  	if !_p_.traceSweep {
  1051  		throw("missing traceGCSweepStart")
  1052  	}
  1053  	if _p_.traceSwept != 0 {
  1054  		traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
  1055  	}
  1056  	_p_.traceSweep = false
  1057  }
  1058  
  1059  func traceGCMarkAssistStart() {
  1060  	traceEvent(traceEvGCMarkAssistStart, 1)
  1061  }
  1062  
  1063  func traceGCMarkAssistDone() {
  1064  	traceEvent(traceEvGCMarkAssistDone, -1)
  1065  }
  1066  
  1067  func traceGoCreate(newg *g, pc uintptr) {
  1068  	newg.traceseq = 0
  1069  	newg.tracelastp = getg().m.p
  1070  	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
  1071  	id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
  1072  	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
  1073  }
  1074  
  1075  func traceGoStart() {
  1076  	_g_ := getg().m.curg
  1077  	_p_ := _g_.m.p
  1078  	_g_.traceseq++
  1079  	if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
  1080  		traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
  1081  	} else if _g_.tracelastp == _p_ {
  1082  		traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
  1083  	} else {
  1084  		_g_.tracelastp = _p_
  1085  		traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
  1086  	}
  1087  }
  1088  
  1089  func traceGoEnd() {
  1090  	traceEvent(traceEvGoEnd, -1)
  1091  }
  1092  
  1093  func traceGoSched() {
  1094  	_g_ := getg()
  1095  	_g_.tracelastp = _g_.m.p
  1096  	traceEvent(traceEvGoSched, 1)
  1097  }
  1098  
  1099  func traceGoPreempt() {
  1100  	_g_ := getg()
  1101  	_g_.tracelastp = _g_.m.p
  1102  	traceEvent(traceEvGoPreempt, 1)
  1103  }
  1104  
  1105  func traceGoPark(traceEv byte, skip int) {
  1106  	if traceEv&traceFutileWakeup != 0 {
  1107  		traceEvent(traceEvFutileWakeup, -1)
  1108  	}
  1109  	traceEvent(traceEv & ^traceFutileWakeup, skip)
  1110  }
  1111  
  1112  func traceGoUnpark(gp *g, skip int) {
  1113  	_p_ := getg().m.p
  1114  	gp.traceseq++
  1115  	if gp.tracelastp == _p_ {
  1116  		traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
  1117  	} else {
  1118  		gp.tracelastp = _p_
  1119  		traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
  1120  	}
  1121  }
  1122  
  1123  func traceGoSysCall() {
  1124  	traceEvent(traceEvGoSysCall, 1)
  1125  }
  1126  
  1127  func traceGoSysExit(ts int64) {
  1128  	if ts != 0 && ts < trace.ticksStart {
  1129  		// There is a race between the code that initializes sysexitticks
  1130  		// (in exitsyscall, which runs without a P, and therefore is not
  1131  		// stopped with the rest of the world) and the code that initializes
  1132  		// a new trace. The recorded sysexitticks must therefore be treated
  1133  		// as "best effort". If they are valid for this trace, then great,
  1134  		// use them for greater accuracy. But if they're not valid for this
  1135  		// trace, assume that the trace was started after the actual syscall
  1136  		// exit (but before we actually managed to start the goroutine,
  1137  		// aka right now), and assign a fresh time stamp to keep the log consistent.
  1138  		ts = 0
  1139  	}
  1140  	_g_ := getg().m.curg
  1141  	_g_.traceseq++
  1142  	_g_.tracelastp = _g_.m.p
  1143  	traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
  1144  }
  1145  
  1146  func traceGoSysBlock(pp *p) {
  1147  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
  1148  	// to handle this we temporary employ the P.
  1149  	mp := acquirem()
  1150  	oldp := mp.p
  1151  	mp.p.set(pp)
  1152  	traceEvent(traceEvGoSysBlock, -1)
  1153  	mp.p = oldp
  1154  	releasem(mp)
  1155  }
  1156  
  1157  func traceHeapAlloc() {
  1158  	traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
  1159  }
  1160  
  1161  func traceHeapGoal() {
  1162  	if heapGoal := atomic.Load64(&gcController.heapGoal); heapGoal == ^uint64(0) {
  1163  		// Heap-based triggering is disabled.
  1164  		traceEvent(traceEvHeapGoal, -1, 0)
  1165  	} else {
  1166  		traceEvent(traceEvHeapGoal, -1, heapGoal)
  1167  	}
  1168  }
  1169  
  1170  // To access runtime functions from runtime/trace.
  1171  // See runtime/trace/annotation.go
  1172  
  1173  //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
  1174  func trace_userTaskCreate(id, parentID uint64, taskType string) {
  1175  	if !trace.enabled {
  1176  		return
  1177  	}
  1178  
  1179  	// Same as in traceEvent.
  1180  	mp, pid, bufp := traceAcquireBuffer()
  1181  	if !trace.enabled && !mp.startingtrace {
  1182  		traceReleaseBuffer(pid)
  1183  		return
  1184  	}
  1185  
  1186  	typeStringID, bufp := traceString(bufp, pid, taskType)
  1187  	traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
  1188  	traceReleaseBuffer(pid)
  1189  }
  1190  
  1191  //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
  1192  func trace_userTaskEnd(id uint64) {
  1193  	traceEvent(traceEvUserTaskEnd, 2, id)
  1194  }
  1195  
  1196  //go:linkname trace_userRegion runtime/trace.userRegion
  1197  func trace_userRegion(id, mode uint64, name string) {
  1198  	if !trace.enabled {
  1199  		return
  1200  	}
  1201  
  1202  	mp, pid, bufp := traceAcquireBuffer()
  1203  	if !trace.enabled && !mp.startingtrace {
  1204  		traceReleaseBuffer(pid)
  1205  		return
  1206  	}
  1207  
  1208  	nameStringID, bufp := traceString(bufp, pid, name)
  1209  	traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
  1210  	traceReleaseBuffer(pid)
  1211  }
  1212  
  1213  //go:linkname trace_userLog runtime/trace.userLog
  1214  func trace_userLog(id uint64, category, message string) {
  1215  	if !trace.enabled {
  1216  		return
  1217  	}
  1218  
  1219  	mp, pid, bufp := traceAcquireBuffer()
  1220  	if !trace.enabled && !mp.startingtrace {
  1221  		traceReleaseBuffer(pid)
  1222  		return
  1223  	}
  1224  
  1225  	categoryID, bufp := traceString(bufp, pid, category)
  1226  
  1227  	extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
  1228  	traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
  1229  	// traceEventLocked reserved extra space for val and len(val)
  1230  	// in buf, so buf now has room for the following.
  1231  	buf := bufp.ptr()
  1232  
  1233  	// double-check the message and its length can fit.
  1234  	// Otherwise, truncate the message.
  1235  	slen := len(message)
  1236  	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
  1237  		slen = room
  1238  	}
  1239  	buf.varint(uint64(slen))
  1240  	buf.pos += copy(buf.arr[buf.pos:], message[:slen])
  1241  
  1242  	traceReleaseBuffer(pid)
  1243  }