github.com/flyinox/gosm@v0.0.0-20171117061539-16768cb62077/src/runtime/trace.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import (
    16  	"runtime/internal/sys"
    17  	"unsafe"
    18  )
    19  
    20  // Event types in the trace, args are given in square brackets.
    21  const (
    22  	traceEvNone              = 0  // unused
    23  	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
    24  	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
    25  	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
    26  	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    27  	traceEvProcStart         = 5  // start of P [timestamp, thread id]
    28  	traceEvProcStop          = 6  // stop of P [timestamp]
    29  	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
    30  	traceEvGCDone            = 8  // GC done [timestamp]
    31  	traceEvGCScanStart       = 9  // GC mark termination start [timestamp]
    32  	traceEvGCScanDone        = 10 // GC mark termination done [timestamp]
    33  	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
    34  	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
    35  	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
    36  	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
    37  	traceEvGoEnd             = 15 // goroutine ends [timestamp]
    38  	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
    39  	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
    40  	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
    41  	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
    42  	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
    43  	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
    44  	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
    45  	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
    46  	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
    47  	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    48  	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
    49  	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
    50  	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
    51  	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
    52  	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
    53  	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
    54  	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
    55  	traceEvHeapAlloc         = 33 // memstats.heap_live change [timestamp, heap_alloc]
    56  	traceEvNextGC            = 34 // memstats.next_gc change [timestamp, next_gc]
    57  	traceEvTimerGoroutine    = 35 // denotes timer goroutine [timer goroutine id]
    58  	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    59  	traceEvString            = 37 // string dictionary entry [ID, length, string]
    60  	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
    61  	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
    62  	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
    63  	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
    64  	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
    65  	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
    66  	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
    67  	traceEvCount             = 45
    68  )
    69  
    70  const (
    71  	// Timestamps in trace are cputicks/traceTickDiv.
    72  	// This makes absolute values of timestamp diffs smaller,
    73  	// and so they are encoded in less number of bytes.
    74  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    75  	// The suggested increment frequency for PowerPC's time base register is
    76  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    77  	// and ppc64le.
    78  	// Tracing won't work reliably for architectures where cputicks is emulated
    79  	// by nanotime, so the value doesn't matter for those architectures.
    80  	traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
    81  	// Maximum number of PCs in a single stack trace.
    82  	// Since events contain only stack id rather than whole stack trace,
    83  	// we can allow quite large values here.
    84  	traceStackSize = 128
    85  	// Identifier of a fake P that is used when we trace without a real P.
    86  	traceGlobProc = -1
    87  	// Maximum number of bytes to encode uint64 in base-128.
    88  	traceBytesPerNumber = 10
    89  	// Shift of the number of arguments in the first event byte.
    90  	traceArgCountShift = 6
    91  	// Flag passed to traceGoPark to denote that the previous wakeup of this
    92  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
    93  	// but another goroutine got ahead and acquired the mutex before the first
    94  	// goroutine is scheduled, so the first goroutine has to block again.
    95  	// Such wakeups happen on buffered channels and sync.Mutex,
    96  	// but are generally not interesting for end user.
    97  	traceFutileWakeup byte = 128
    98  )
    99  
   100  // trace is global tracing context.
   101  var trace struct {
   102  	lock          mutex       // protects the following members
   103  	lockOwner     *g          // to avoid deadlocks during recursive lock locks
   104  	enabled       bool        // when set runtime traces events
   105  	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
   106  	headerWritten bool        // whether ReadTrace has emitted trace header
   107  	footerWritten bool        // whether ReadTrace has emitted trace footer
   108  	shutdownSema  uint32      // used to wait for ReadTrace completion
   109  	seqStart      uint64      // sequence number when tracing was started
   110  	ticksStart    int64       // cputicks when tracing was started
   111  	ticksEnd      int64       // cputicks when tracing was stopped
   112  	timeStart     int64       // nanotime when tracing was started
   113  	timeEnd       int64       // nanotime when tracing was stopped
   114  	seqGC         uint64      // GC start/done sequencer
   115  	reading       traceBufPtr // buffer currently handed off to user
   116  	empty         traceBufPtr // stack of empty buffers
   117  	fullHead      traceBufPtr // queue of full buffers
   118  	fullTail      traceBufPtr
   119  	reader        guintptr        // goroutine that called ReadTrace, or nil
   120  	stackTab      traceStackTable // maps stack traces to unique ids
   121  
   122  	// Dictionary for traceEvString.
   123  	//
   124  	// Currently this is used only at trace setup and for
   125  	// func/file:line info after tracing session, so we assume
   126  	// single-threaded access.
   127  	strings   map[string]uint64
   128  	stringSeq uint64
   129  
   130  	// markWorkerLabels maps gcMarkWorkerMode to string ID.
   131  	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
   132  
   133  	bufLock mutex       // protects buf
   134  	buf     traceBufPtr // global trace buffer, used when running without a p
   135  }
   136  
   137  // traceBufHeader is per-P tracing buffer.
   138  type traceBufHeader struct {
   139  	link      traceBufPtr             // in trace.empty/full
   140  	lastTicks uint64                  // when we wrote the last event
   141  	pos       int                     // next write offset in arr
   142  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   143  }
   144  
   145  // traceBuf is per-P tracing buffer.
   146  //
   147  //go:notinheap
   148  type traceBuf struct {
   149  	traceBufHeader
   150  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   151  }
   152  
   153  // traceBufPtr is a *traceBuf that is not traced by the garbage
   154  // collector and doesn't have write barriers. traceBufs are not
   155  // allocated from the GC'd heap, so this is safe, and are often
   156  // manipulated in contexts where write barriers are not allowed, so
   157  // this is necessary.
   158  //
   159  // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
   160  type traceBufPtr uintptr
   161  
   162  func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   163  func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   164  func traceBufPtrOf(b *traceBuf) traceBufPtr {
   165  	return traceBufPtr(unsafe.Pointer(b))
   166  }
   167  
   168  // StartTrace enables tracing for the current process.
   169  // While tracing, the data will be buffered and available via ReadTrace.
   170  // StartTrace returns an error if tracing is already enabled.
   171  // Most clients should use the runtime/trace package or the testing package's
   172  // -test.trace flag instead of calling StartTrace directly.
   173  func StartTrace() error {
   174  	// Stop the world, so that we can take a consistent snapshot
   175  	// of all goroutines at the beginning of the trace.
   176  	stopTheWorld("start tracing")
   177  
   178  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   179  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   180  	// and decide to write to trace at a random point in time.
   181  	// However, such syscall will use the global trace.buf buffer, because we've
   182  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   183  	lock(&trace.bufLock)
   184  
   185  	if trace.enabled || trace.shutdown {
   186  		unlock(&trace.bufLock)
   187  		startTheWorld()
   188  		return errorString("tracing is already enabled")
   189  	}
   190  
   191  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   192  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   193  	// That would lead to an inconsistent trace:
   194  	// - either GoSysExit appears before EvGoInSyscall,
   195  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   196  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   197  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   198  	_g_ := getg()
   199  	_g_.m.startingtrace = true
   200  
   201  	// Obtain current stack ID to use in all traceEvGoCreate events below.
   202  	mp := acquirem()
   203  	stkBuf := make([]uintptr, traceStackSize)
   204  	stackID := traceStackID(mp, stkBuf, 2)
   205  	releasem(mp)
   206  
   207  	for _, gp := range allgs {
   208  		status := readgstatus(gp)
   209  		if status != _Gdead {
   210  			gp.traceseq = 0
   211  			gp.tracelastp = getg().m.p
   212  			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   213  			id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
   214  			traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
   215  		}
   216  		if status == _Gwaiting {
   217  			// traceEvGoWaiting is implied to have seq=1.
   218  			gp.traceseq++
   219  			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   220  		}
   221  		if status == _Gsyscall {
   222  			gp.traceseq++
   223  			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   224  		} else {
   225  			gp.sysblocktraced = false
   226  		}
   227  	}
   228  	traceProcStart()
   229  	traceGoStart()
   230  	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
   231  	// If we do it the other way around, it is possible that exitsyscall will
   232  	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
   233  	// It will lead to a false conclusion that cputicks is broken.
   234  	trace.ticksStart = cputicks()
   235  	trace.timeStart = nanotime()
   236  	trace.headerWritten = false
   237  	trace.footerWritten = false
   238  	trace.strings = make(map[string]uint64)
   239  	trace.stringSeq = 0
   240  	trace.seqGC = 0
   241  	_g_.m.startingtrace = false
   242  	trace.enabled = true
   243  
   244  	// Register runtime goroutine labels.
   245  	_, pid, bufp := traceAcquireBuffer()
   246  	buf := (*bufp).ptr()
   247  	if buf == nil {
   248  		buf = traceFlush(0).ptr()
   249  		(*bufp).set(buf)
   250  	}
   251  	for i, label := range gcMarkWorkerModeStrings[:] {
   252  		trace.markWorkerLabels[i], buf = traceString(buf, label)
   253  	}
   254  	traceReleaseBuffer(pid)
   255  
   256  	unlock(&trace.bufLock)
   257  
   258  	startTheWorld()
   259  	return nil
   260  }
   261  
   262  // StopTrace stops tracing, if it was previously enabled.
   263  // StopTrace only returns after all the reads for the trace have completed.
   264  func StopTrace() {
   265  	// Stop the world so that we can collect the trace buffers from all p's below,
   266  	// and also to avoid races with traceEvent.
   267  	stopTheWorld("stop tracing")
   268  
   269  	// See the comment in StartTrace.
   270  	lock(&trace.bufLock)
   271  
   272  	if !trace.enabled {
   273  		unlock(&trace.bufLock)
   274  		startTheWorld()
   275  		return
   276  	}
   277  
   278  	traceGoSched()
   279  
   280  	for _, p := range &allp {
   281  		if p == nil {
   282  			break
   283  		}
   284  		buf := p.tracebuf
   285  		if buf != 0 {
   286  			traceFullQueue(buf)
   287  			p.tracebuf = 0
   288  		}
   289  	}
   290  	if trace.buf != 0 {
   291  		buf := trace.buf
   292  		trace.buf = 0
   293  		if buf.ptr().pos != 0 {
   294  			traceFullQueue(buf)
   295  		}
   296  	}
   297  
   298  	for {
   299  		trace.ticksEnd = cputicks()
   300  		trace.timeEnd = nanotime()
   301  		// Windows time can tick only every 15ms, wait for at least one tick.
   302  		if trace.timeEnd != trace.timeStart {
   303  			break
   304  		}
   305  		osyield()
   306  	}
   307  
   308  	trace.enabled = false
   309  	trace.shutdown = true
   310  	unlock(&trace.bufLock)
   311  
   312  	startTheWorld()
   313  
   314  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   315  	// Wait for the trace reader to flush pending buffers and stop.
   316  	semacquire(&trace.shutdownSema)
   317  	if raceenabled {
   318  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   319  	}
   320  
   321  	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   322  	lock(&trace.lock)
   323  	for _, p := range &allp {
   324  		if p == nil {
   325  			break
   326  		}
   327  		if p.tracebuf != 0 {
   328  			throw("trace: non-empty trace buffer in proc")
   329  		}
   330  	}
   331  	if trace.buf != 0 {
   332  		throw("trace: non-empty global trace buffer")
   333  	}
   334  	if trace.fullHead != 0 || trace.fullTail != 0 {
   335  		throw("trace: non-empty full trace buffer")
   336  	}
   337  	if trace.reading != 0 || trace.reader != 0 {
   338  		throw("trace: reading after shutdown")
   339  	}
   340  	for trace.empty != 0 {
   341  		buf := trace.empty
   342  		trace.empty = buf.ptr().link
   343  		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   344  	}
   345  	trace.strings = nil
   346  	trace.shutdown = false
   347  	unlock(&trace.lock)
   348  }
   349  
   350  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   351  // is available. If tracing is turned off and all the data accumulated while it
   352  // was on has been returned, ReadTrace returns nil. The caller must copy the
   353  // returned data before calling ReadTrace again.
   354  // ReadTrace must be called from one goroutine at a time.
   355  func ReadTrace() []byte {
   356  	// This function may need to lock trace.lock recursively
   357  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   358  	// To allow this we use trace.lockOwner.
   359  	// Also this function must not allocate while holding trace.lock:
   360  	// allocation can call heap allocate, which will try to emit a trace
   361  	// event while holding heap lock.
   362  	lock(&trace.lock)
   363  	trace.lockOwner = getg()
   364  
   365  	if trace.reader != 0 {
   366  		// More than one goroutine reads trace. This is bad.
   367  		// But we rather do not crash the program because of tracing,
   368  		// because tracing can be enabled at runtime on prod servers.
   369  		trace.lockOwner = nil
   370  		unlock(&trace.lock)
   371  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   372  		return nil
   373  	}
   374  	// Recycle the old buffer.
   375  	if buf := trace.reading; buf != 0 {
   376  		buf.ptr().link = trace.empty
   377  		trace.empty = buf
   378  		trace.reading = 0
   379  	}
   380  	// Write trace header.
   381  	if !trace.headerWritten {
   382  		trace.headerWritten = true
   383  		trace.lockOwner = nil
   384  		unlock(&trace.lock)
   385  		return []byte("go 1.9 trace\x00\x00\x00\x00")
   386  	}
   387  	// Wait for new data.
   388  	if trace.fullHead == 0 && !trace.shutdown {
   389  		trace.reader.set(getg())
   390  		goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
   391  		lock(&trace.lock)
   392  	}
   393  	// Write a buffer.
   394  	if trace.fullHead != 0 {
   395  		buf := traceFullDequeue()
   396  		trace.reading = buf
   397  		trace.lockOwner = nil
   398  		unlock(&trace.lock)
   399  		return buf.ptr().arr[:buf.ptr().pos]
   400  	}
   401  	// Write footer with timer frequency.
   402  	if !trace.footerWritten {
   403  		trace.footerWritten = true
   404  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   405  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   406  		trace.lockOwner = nil
   407  		unlock(&trace.lock)
   408  		var data []byte
   409  		data = append(data, traceEvFrequency|0<<traceArgCountShift)
   410  		data = traceAppend(data, uint64(freq))
   411  		if timers.gp != nil {
   412  			data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
   413  			data = traceAppend(data, uint64(timers.gp.goid))
   414  		}
   415  		// This will emit a bunch of full buffers, we will pick them up
   416  		// on the next iteration.
   417  		trace.stackTab.dump()
   418  		return data
   419  	}
   420  	// Done.
   421  	if trace.shutdown {
   422  		trace.lockOwner = nil
   423  		unlock(&trace.lock)
   424  		if raceenabled {
   425  			// Model synchronization on trace.shutdownSema, which race
   426  			// detector does not see. This is required to avoid false
   427  			// race reports on writer passed to trace.Start.
   428  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   429  		}
   430  		// trace.enabled is already reset, so can call traceable functions.
   431  		semrelease(&trace.shutdownSema)
   432  		return nil
   433  	}
   434  	// Also bad, but see the comment above.
   435  	trace.lockOwner = nil
   436  	unlock(&trace.lock)
   437  	println("runtime: spurious wakeup of trace reader")
   438  	return nil
   439  }
   440  
   441  // traceReader returns the trace reader that should be woken up, if any.
   442  func traceReader() *g {
   443  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   444  		return nil
   445  	}
   446  	lock(&trace.lock)
   447  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   448  		unlock(&trace.lock)
   449  		return nil
   450  	}
   451  	gp := trace.reader.ptr()
   452  	trace.reader.set(nil)
   453  	unlock(&trace.lock)
   454  	return gp
   455  }
   456  
   457  // traceProcFree frees trace buffer associated with pp.
   458  func traceProcFree(pp *p) {
   459  	buf := pp.tracebuf
   460  	pp.tracebuf = 0
   461  	if buf == 0 {
   462  		return
   463  	}
   464  	lock(&trace.lock)
   465  	traceFullQueue(buf)
   466  	unlock(&trace.lock)
   467  }
   468  
   469  // traceFullQueue queues buf into queue of full buffers.
   470  func traceFullQueue(buf traceBufPtr) {
   471  	buf.ptr().link = 0
   472  	if trace.fullHead == 0 {
   473  		trace.fullHead = buf
   474  	} else {
   475  		trace.fullTail.ptr().link = buf
   476  	}
   477  	trace.fullTail = buf
   478  }
   479  
   480  // traceFullDequeue dequeues from queue of full buffers.
   481  func traceFullDequeue() traceBufPtr {
   482  	buf := trace.fullHead
   483  	if buf == 0 {
   484  		return 0
   485  	}
   486  	trace.fullHead = buf.ptr().link
   487  	if trace.fullHead == 0 {
   488  		trace.fullTail = 0
   489  	}
   490  	buf.ptr().link = 0
   491  	return buf
   492  }
   493  
   494  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   495  // ev is event type.
   496  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   497  // If skip = 0, this event type should contain a stack, but we don't want
   498  // to collect and remember it for this particular call.
   499  func traceEvent(ev byte, skip int, args ...uint64) {
   500  	mp, pid, bufp := traceAcquireBuffer()
   501  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   502  	// This protects from races between traceEvent and StartTrace/StopTrace.
   503  
   504  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   505  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   506  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   507  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   508  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   509  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   510  	if !trace.enabled && !mp.startingtrace {
   511  		traceReleaseBuffer(pid)
   512  		return
   513  	}
   514  	buf := (*bufp).ptr()
   515  	const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
   516  	if buf == nil || len(buf.arr)-buf.pos < maxSize {
   517  		buf = traceFlush(traceBufPtrOf(buf)).ptr()
   518  		(*bufp).set(buf)
   519  	}
   520  
   521  	ticks := uint64(cputicks()) / traceTickDiv
   522  	tickDiff := ticks - buf.lastTicks
   523  	if buf.pos == 0 {
   524  		buf.byte(traceEvBatch | 1<<traceArgCountShift)
   525  		buf.varint(uint64(pid))
   526  		buf.varint(ticks)
   527  		tickDiff = 0
   528  	}
   529  	buf.lastTicks = ticks
   530  	narg := byte(len(args))
   531  	if skip >= 0 {
   532  		narg++
   533  	}
   534  	// We have only 2 bits for number of arguments.
   535  	// If number is >= 3, then the event type is followed by event length in bytes.
   536  	if narg > 3 {
   537  		narg = 3
   538  	}
   539  	startPos := buf.pos
   540  	buf.byte(ev | narg<<traceArgCountShift)
   541  	var lenp *byte
   542  	if narg == 3 {
   543  		// Reserve the byte for length assuming that length < 128.
   544  		buf.varint(0)
   545  		lenp = &buf.arr[buf.pos-1]
   546  	}
   547  	buf.varint(tickDiff)
   548  	for _, a := range args {
   549  		buf.varint(a)
   550  	}
   551  	if skip == 0 {
   552  		buf.varint(0)
   553  	} else if skip > 0 {
   554  		buf.varint(traceStackID(mp, buf.stk[:], skip))
   555  	}
   556  	evSize := buf.pos - startPos
   557  	if evSize > maxSize {
   558  		throw("invalid length of trace event")
   559  	}
   560  	if lenp != nil {
   561  		// Fill in actual length.
   562  		*lenp = byte(evSize - 2)
   563  	}
   564  	traceReleaseBuffer(pid)
   565  }
   566  
   567  func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
   568  	_g_ := getg()
   569  	gp := mp.curg
   570  	var nstk int
   571  	if gp == _g_ {
   572  		nstk = callers(skip+1, buf[:])
   573  	} else if gp != nil {
   574  		gp = mp.curg
   575  		nstk = gcallers(gp, skip, buf[:])
   576  	}
   577  	if nstk > 0 {
   578  		nstk-- // skip runtime.goexit
   579  	}
   580  	if nstk > 0 && gp.goid == 1 {
   581  		nstk-- // skip runtime.main
   582  	}
   583  	id := trace.stackTab.put(buf[:nstk])
   584  	return uint64(id)
   585  }
   586  
   587  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   588  func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   589  	mp = acquirem()
   590  	if p := mp.p.ptr(); p != nil {
   591  		return mp, p.id, &p.tracebuf
   592  	}
   593  	lock(&trace.bufLock)
   594  	return mp, traceGlobProc, &trace.buf
   595  }
   596  
   597  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   598  func traceReleaseBuffer(pid int32) {
   599  	if pid == traceGlobProc {
   600  		unlock(&trace.bufLock)
   601  	}
   602  	releasem(getg().m)
   603  }
   604  
   605  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   606  func traceFlush(buf traceBufPtr) traceBufPtr {
   607  	owner := trace.lockOwner
   608  	dolock := owner == nil || owner != getg().m.curg
   609  	if dolock {
   610  		lock(&trace.lock)
   611  	}
   612  	if buf != 0 {
   613  		traceFullQueue(buf)
   614  	}
   615  	if trace.empty != 0 {
   616  		buf = trace.empty
   617  		trace.empty = buf.ptr().link
   618  	} else {
   619  		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   620  		if buf == 0 {
   621  			throw("trace: out of memory")
   622  		}
   623  	}
   624  	bufp := buf.ptr()
   625  	bufp.link.set(nil)
   626  	bufp.pos = 0
   627  	bufp.lastTicks = 0
   628  	if dolock {
   629  		unlock(&trace.lock)
   630  	}
   631  	return buf
   632  }
   633  
   634  func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
   635  	if s == "" {
   636  		return 0, buf
   637  	}
   638  	if id, ok := trace.strings[s]; ok {
   639  		return id, buf
   640  	}
   641  
   642  	trace.stringSeq++
   643  	id := trace.stringSeq
   644  	trace.strings[s] = id
   645  
   646  	size := 1 + 2*traceBytesPerNumber + len(s)
   647  	if len(buf.arr)-buf.pos < size {
   648  		buf = traceFlush(traceBufPtrOf(buf)).ptr()
   649  	}
   650  	buf.byte(traceEvString)
   651  	buf.varint(id)
   652  	buf.varint(uint64(len(s)))
   653  	buf.pos += copy(buf.arr[buf.pos:], s)
   654  	return id, buf
   655  }
   656  
   657  // traceAppend appends v to buf in little-endian-base-128 encoding.
   658  func traceAppend(buf []byte, v uint64) []byte {
   659  	for ; v >= 0x80; v >>= 7 {
   660  		buf = append(buf, 0x80|byte(v))
   661  	}
   662  	buf = append(buf, byte(v))
   663  	return buf
   664  }
   665  
   666  // varint appends v to buf in little-endian-base-128 encoding.
   667  func (buf *traceBuf) varint(v uint64) {
   668  	pos := buf.pos
   669  	for ; v >= 0x80; v >>= 7 {
   670  		buf.arr[pos] = 0x80 | byte(v)
   671  		pos++
   672  	}
   673  	buf.arr[pos] = byte(v)
   674  	pos++
   675  	buf.pos = pos
   676  }
   677  
   678  // byte appends v to buf.
   679  func (buf *traceBuf) byte(v byte) {
   680  	buf.arr[buf.pos] = v
   681  	buf.pos++
   682  }
   683  
   684  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   685  // It is lock-free for reading.
   686  type traceStackTable struct {
   687  	lock mutex
   688  	seq  uint32
   689  	mem  traceAlloc
   690  	tab  [1 << 13]traceStackPtr
   691  }
   692  
   693  // traceStack is a single stack in traceStackTable.
   694  type traceStack struct {
   695  	link traceStackPtr
   696  	hash uintptr
   697  	id   uint32
   698  	n    int
   699  	stk  [0]uintptr // real type [n]uintptr
   700  }
   701  
   702  type traceStackPtr uintptr
   703  
   704  func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
   705  
   706  // stack returns slice of PCs.
   707  func (ts *traceStack) stack() []uintptr {
   708  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   709  }
   710  
   711  // put returns a unique id for the stack trace pcs and caches it in the table,
   712  // if it sees the trace for the first time.
   713  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   714  	if len(pcs) == 0 {
   715  		return 0
   716  	}
   717  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   718  	// First, search the hashtable w/o the mutex.
   719  	if id := tab.find(pcs, hash); id != 0 {
   720  		return id
   721  	}
   722  	// Now, double check under the mutex.
   723  	lock(&tab.lock)
   724  	if id := tab.find(pcs, hash); id != 0 {
   725  		unlock(&tab.lock)
   726  		return id
   727  	}
   728  	// Create new record.
   729  	tab.seq++
   730  	stk := tab.newStack(len(pcs))
   731  	stk.hash = hash
   732  	stk.id = tab.seq
   733  	stk.n = len(pcs)
   734  	stkpc := stk.stack()
   735  	for i, pc := range pcs {
   736  		stkpc[i] = pc
   737  	}
   738  	part := int(hash % uintptr(len(tab.tab)))
   739  	stk.link = tab.tab[part]
   740  	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   741  	unlock(&tab.lock)
   742  	return stk.id
   743  }
   744  
   745  // find checks if the stack trace pcs is already present in the table.
   746  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   747  	part := int(hash % uintptr(len(tab.tab)))
   748  Search:
   749  	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
   750  		if stk.hash == hash && stk.n == len(pcs) {
   751  			for i, stkpc := range stk.stack() {
   752  				if stkpc != pcs[i] {
   753  					continue Search
   754  				}
   755  			}
   756  			return stk.id
   757  		}
   758  	}
   759  	return 0
   760  }
   761  
   762  // newStack allocates a new stack of size n.
   763  func (tab *traceStackTable) newStack(n int) *traceStack {
   764  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
   765  }
   766  
   767  // allFrames returns all of the Frames corresponding to pcs.
   768  func allFrames(pcs []uintptr) []Frame {
   769  	frames := make([]Frame, 0, len(pcs))
   770  	ci := CallersFrames(pcs)
   771  	for {
   772  		f, more := ci.Next()
   773  		frames = append(frames, f)
   774  		if !more {
   775  			return frames
   776  		}
   777  	}
   778  }
   779  
   780  // dump writes all previously cached stacks to trace buffers,
   781  // releases all memory and resets state.
   782  func (tab *traceStackTable) dump() {
   783  	var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
   784  	buf := traceFlush(0).ptr()
   785  	for _, stk := range tab.tab {
   786  		stk := stk.ptr()
   787  		for ; stk != nil; stk = stk.link.ptr() {
   788  			tmpbuf := tmp[:0]
   789  			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   790  			frames := allFrames(stk.stack())
   791  			tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
   792  			for _, f := range frames {
   793  				var frame traceFrame
   794  				frame, buf = traceFrameForPC(buf, f)
   795  				tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
   796  				tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
   797  				tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
   798  				tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
   799  			}
   800  			// Now copy to the buffer.
   801  			size := 1 + traceBytesPerNumber + len(tmpbuf)
   802  			if len(buf.arr)-buf.pos < size {
   803  				buf = traceFlush(traceBufPtrOf(buf)).ptr()
   804  			}
   805  			buf.byte(traceEvStack | 3<<traceArgCountShift)
   806  			buf.varint(uint64(len(tmpbuf)))
   807  			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
   808  		}
   809  	}
   810  
   811  	lock(&trace.lock)
   812  	traceFullQueue(traceBufPtrOf(buf))
   813  	unlock(&trace.lock)
   814  
   815  	tab.mem.drop()
   816  	*tab = traceStackTable{}
   817  }
   818  
   819  type traceFrame struct {
   820  	funcID uint64
   821  	fileID uint64
   822  	line   uint64
   823  }
   824  
   825  func traceFrameForPC(buf *traceBuf, f Frame) (traceFrame, *traceBuf) {
   826  	var frame traceFrame
   827  
   828  	fn := f.Function
   829  	const maxLen = 1 << 10
   830  	if len(fn) > maxLen {
   831  		fn = fn[len(fn)-maxLen:]
   832  	}
   833  	frame.funcID, buf = traceString(buf, fn)
   834  	frame.line = uint64(f.Line)
   835  	file := f.File
   836  	if len(file) > maxLen {
   837  		file = file[len(file)-maxLen:]
   838  	}
   839  	frame.fileID, buf = traceString(buf, file)
   840  	return frame, buf
   841  }
   842  
   843  // traceAlloc is a non-thread-safe region allocator.
   844  // It holds a linked list of traceAllocBlock.
   845  type traceAlloc struct {
   846  	head traceAllocBlockPtr
   847  	off  uintptr
   848  }
   849  
   850  // traceAllocBlock is a block in traceAlloc.
   851  //
   852  // traceAllocBlock is allocated from non-GC'd memory, so it must not
   853  // contain heap pointers. Writes to pointers to traceAllocBlocks do
   854  // not need write barriers.
   855  //
   856  //go:notinheap
   857  type traceAllocBlock struct {
   858  	next traceAllocBlockPtr
   859  	data [64<<10 - sys.PtrSize]byte
   860  }
   861  
   862  // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
   863  type traceAllocBlockPtr uintptr
   864  
   865  func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
   866  func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
   867  
   868  // alloc allocates n-byte block.
   869  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   870  	n = round(n, sys.PtrSize)
   871  	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
   872  		if n > uintptr(len(a.head.ptr().data)) {
   873  			throw("trace: alloc too large")
   874  		}
   875  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   876  		if block == nil {
   877  			throw("trace: out of memory")
   878  		}
   879  		block.next.set(a.head.ptr())
   880  		a.head.set(block)
   881  		a.off = 0
   882  	}
   883  	p := &a.head.ptr().data[a.off]
   884  	a.off += n
   885  	return unsafe.Pointer(p)
   886  }
   887  
   888  // drop frees all previously allocated memory and resets the allocator.
   889  func (a *traceAlloc) drop() {
   890  	for a.head != 0 {
   891  		block := a.head.ptr()
   892  		a.head.set(block.next.ptr())
   893  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   894  	}
   895  }
   896  
   897  // The following functions write specific events to trace.
   898  
   899  func traceGomaxprocs(procs int32) {
   900  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   901  }
   902  
   903  func traceProcStart() {
   904  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   905  }
   906  
   907  func traceProcStop(pp *p) {
   908  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   909  	// to handle this we temporary employ the P.
   910  	mp := acquirem()
   911  	oldp := mp.p
   912  	mp.p.set(pp)
   913  	traceEvent(traceEvProcStop, -1)
   914  	mp.p = oldp
   915  	releasem(mp)
   916  }
   917  
   918  func traceGCStart() {
   919  	traceEvent(traceEvGCStart, 3, trace.seqGC)
   920  	trace.seqGC++
   921  }
   922  
   923  func traceGCDone() {
   924  	traceEvent(traceEvGCDone, -1)
   925  }
   926  
   927  func traceGCScanStart() {
   928  	traceEvent(traceEvGCScanStart, -1)
   929  }
   930  
   931  func traceGCScanDone() {
   932  	traceEvent(traceEvGCScanDone, -1)
   933  }
   934  
   935  // traceGCSweepStart prepares to trace a sweep loop. This does not
   936  // emit any events until traceGCSweepSpan is called.
   937  //
   938  // traceGCSweepStart must be paired with traceGCSweepDone and there
   939  // must be no preemption points between these two calls.
   940  func traceGCSweepStart() {
   941  	// Delay the actual GCSweepStart event until the first span
   942  	// sweep. If we don't sweep anything, don't emit any events.
   943  	_p_ := getg().m.p.ptr()
   944  	if _p_.traceSweep {
   945  		throw("double traceGCSweepStart")
   946  	}
   947  	_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
   948  }
   949  
   950  // traceGCSweepSpan traces the sweep of a single page.
   951  //
   952  // This may be called outside a traceGCSweepStart/traceGCSweepDone
   953  // pair; however, it will not emit any trace events in this case.
   954  func traceGCSweepSpan(bytesSwept uintptr) {
   955  	_p_ := getg().m.p.ptr()
   956  	if _p_.traceSweep {
   957  		if _p_.traceSwept == 0 {
   958  			traceEvent(traceEvGCSweepStart, 1)
   959  		}
   960  		_p_.traceSwept += bytesSwept
   961  	}
   962  }
   963  
   964  func traceGCSweepDone() {
   965  	_p_ := getg().m.p.ptr()
   966  	if !_p_.traceSweep {
   967  		throw("missing traceGCSweepStart")
   968  	}
   969  	if _p_.traceSwept != 0 {
   970  		traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
   971  	}
   972  	_p_.traceSweep = false
   973  }
   974  
   975  func traceGCMarkAssistStart() {
   976  	traceEvent(traceEvGCMarkAssistStart, 1)
   977  }
   978  
   979  func traceGCMarkAssistDone() {
   980  	traceEvent(traceEvGCMarkAssistDone, -1)
   981  }
   982  
   983  func traceGoCreate(newg *g, pc uintptr) {
   984  	newg.traceseq = 0
   985  	newg.tracelastp = getg().m.p
   986  	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   987  	id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
   988  	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
   989  }
   990  
   991  func traceGoStart() {
   992  	_g_ := getg().m.curg
   993  	_p_ := _g_.m.p
   994  	_g_.traceseq++
   995  	if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
   996  		traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
   997  	} else if _g_.tracelastp == _p_ {
   998  		traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
   999  	} else {
  1000  		_g_.tracelastp = _p_
  1001  		traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
  1002  	}
  1003  }
  1004  
  1005  func traceGoEnd() {
  1006  	traceEvent(traceEvGoEnd, -1)
  1007  }
  1008  
  1009  func traceGoSched() {
  1010  	_g_ := getg()
  1011  	_g_.tracelastp = _g_.m.p
  1012  	traceEvent(traceEvGoSched, 1)
  1013  }
  1014  
  1015  func traceGoPreempt() {
  1016  	_g_ := getg()
  1017  	_g_.tracelastp = _g_.m.p
  1018  	traceEvent(traceEvGoPreempt, 1)
  1019  }
  1020  
  1021  func traceGoPark(traceEv byte, skip int) {
  1022  	if traceEv&traceFutileWakeup != 0 {
  1023  		traceEvent(traceEvFutileWakeup, -1)
  1024  	}
  1025  	traceEvent(traceEv & ^traceFutileWakeup, skip)
  1026  }
  1027  
  1028  func traceGoUnpark(gp *g, skip int) {
  1029  	_p_ := getg().m.p
  1030  	gp.traceseq++
  1031  	if gp.tracelastp == _p_ {
  1032  		traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
  1033  	} else {
  1034  		gp.tracelastp = _p_
  1035  		traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
  1036  	}
  1037  }
  1038  
  1039  func traceGoSysCall() {
  1040  	traceEvent(traceEvGoSysCall, 1)
  1041  }
  1042  
  1043  func traceGoSysExit(ts int64) {
  1044  	if ts != 0 && ts < trace.ticksStart {
  1045  		// There is a race between the code that initializes sysexitticks
  1046  		// (in exitsyscall, which runs without a P, and therefore is not
  1047  		// stopped with the rest of the world) and the code that initializes
  1048  		// a new trace. The recorded sysexitticks must therefore be treated
  1049  		// as "best effort". If they are valid for this trace, then great,
  1050  		// use them for greater accuracy. But if they're not valid for this
  1051  		// trace, assume that the trace was started after the actual syscall
  1052  		// exit (but before we actually managed to start the goroutine,
  1053  		// aka right now), and assign a fresh time stamp to keep the log consistent.
  1054  		ts = 0
  1055  	}
  1056  	_g_ := getg().m.curg
  1057  	_g_.traceseq++
  1058  	_g_.tracelastp = _g_.m.p
  1059  	traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
  1060  }
  1061  
  1062  func traceGoSysBlock(pp *p) {
  1063  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
  1064  	// to handle this we temporary employ the P.
  1065  	mp := acquirem()
  1066  	oldp := mp.p
  1067  	mp.p.set(pp)
  1068  	traceEvent(traceEvGoSysBlock, -1)
  1069  	mp.p = oldp
  1070  	releasem(mp)
  1071  }
  1072  
  1073  func traceHeapAlloc() {
  1074  	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
  1075  }
  1076  
  1077  func traceNextGC() {
  1078  	if memstats.next_gc == ^uint64(0) {
  1079  		// Heap-based triggering is disabled.
  1080  		traceEvent(traceEvNextGC, -1, 0)
  1081  	} else {
  1082  		traceEvent(traceEvNextGC, -1, memstats.next_gc)
  1083  	}
  1084  }