github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/runtime/trace.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import (
    16  	"runtime/internal/sys"
    17  	"unsafe"
    18  )
    19  
    20  // Event types in the trace, args are given in square brackets.
    21  const (
    22  	traceEvNone           = 0  // unused
    23  	traceEvBatch          = 1  // start of per-P batch of events [pid, timestamp]
    24  	traceEvFrequency      = 2  // contains tracer timer frequency [frequency (ticks per second)]
    25  	traceEvStack          = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
    26  	traceEvGomaxprocs     = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    27  	traceEvProcStart      = 5  // start of P [timestamp, thread id]
    28  	traceEvProcStop       = 6  // stop of P [timestamp]
    29  	traceEvGCStart        = 7  // GC start [timestamp, seq, stack id]
    30  	traceEvGCDone         = 8  // GC done [timestamp]
    31  	traceEvGCScanStart    = 9  // GC mark termination start [timestamp]
    32  	traceEvGCScanDone     = 10 // GC mark termination done [timestamp]
    33  	traceEvGCSweepStart   = 11 // GC sweep start [timestamp, stack id]
    34  	traceEvGCSweepDone    = 12 // GC sweep done [timestamp]
    35  	traceEvGoCreate       = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
    36  	traceEvGoStart        = 14 // goroutine starts running [timestamp, goroutine id, seq]
    37  	traceEvGoEnd          = 15 // goroutine ends [timestamp]
    38  	traceEvGoStop         = 16 // goroutine stops (like in select{}) [timestamp, stack]
    39  	traceEvGoSched        = 17 // goroutine calls Gosched [timestamp, stack]
    40  	traceEvGoPreempt      = 18 // goroutine is preempted [timestamp, stack]
    41  	traceEvGoSleep        = 19 // goroutine calls Sleep [timestamp, stack]
    42  	traceEvGoBlock        = 20 // goroutine blocks [timestamp, stack]
    43  	traceEvGoUnblock      = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
    44  	traceEvGoBlockSend    = 22 // goroutine blocks on chan send [timestamp, stack]
    45  	traceEvGoBlockRecv    = 23 // goroutine blocks on chan recv [timestamp, stack]
    46  	traceEvGoBlockSelect  = 24 // goroutine blocks on select [timestamp, stack]
    47  	traceEvGoBlockSync    = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    48  	traceEvGoBlockCond    = 26 // goroutine blocks on Cond [timestamp, stack]
    49  	traceEvGoBlockNet     = 27 // goroutine blocks on network [timestamp, stack]
    50  	traceEvGoSysCall      = 28 // syscall enter [timestamp, stack]
    51  	traceEvGoSysExit      = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
    52  	traceEvGoSysBlock     = 30 // syscall blocks [timestamp]
    53  	traceEvGoWaiting      = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
    54  	traceEvGoInSyscall    = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
    55  	traceEvHeapAlloc      = 33 // memstats.heap_live change [timestamp, heap_alloc]
    56  	traceEvNextGC         = 34 // memstats.next_gc change [timestamp, next_gc]
    57  	traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
    58  	traceEvFutileWakeup   = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    59  	traceEvString         = 37 // string dictionary entry [ID, length, string]
    60  	traceEvGoStartLocal   = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
    61  	traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
    62  	traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
    63  	traceEvGoStartLabel   = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
    64  	traceEvGoBlockGC      = 42 // goroutine blocks on GC assist [timestamp, stack]
    65  	traceEvCount          = 43
    66  )
    67  
    68  const (
    69  	// Timestamps in trace are cputicks/traceTickDiv.
    70  	// This makes absolute values of timestamp diffs smaller,
    71  	// and so they are encoded in less number of bytes.
    72  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    73  	// The suggested increment frequency for PowerPC's time base register is
    74  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    75  	// and ppc64le.
    76  	// Tracing won't work reliably for architectures where cputicks is emulated
    77  	// by nanotime, so the value doesn't matter for those architectures.
    78  	traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
    79  	// Maximum number of PCs in a single stack trace.
    80  	// Since events contain only stack id rather than whole stack trace,
    81  	// we can allow quite large values here.
    82  	traceStackSize = 128
    83  	// Identifier of a fake P that is used when we trace without a real P.
    84  	traceGlobProc = -1
    85  	// Maximum number of bytes to encode uint64 in base-128.
    86  	traceBytesPerNumber = 10
    87  	// Shift of the number of arguments in the first event byte.
    88  	traceArgCountShift = 6
    89  	// Flag passed to traceGoPark to denote that the previous wakeup of this
    90  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
    91  	// but another goroutine got ahead and acquired the mutex before the first
    92  	// goroutine is scheduled, so the first goroutine has to block again.
    93  	// Such wakeups happen on buffered channels and sync.Mutex,
    94  	// but are generally not interesting for end user.
    95  	traceFutileWakeup byte = 128
    96  )
    97  
    98  // trace is global tracing context.
    99  var trace struct {
   100  	lock          mutex       // protects the following members
   101  	lockOwner     *g          // to avoid deadlocks during recursive lock locks
   102  	enabled       bool        // when set runtime traces events
   103  	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
   104  	headerWritten bool        // whether ReadTrace has emitted trace header
   105  	footerWritten bool        // whether ReadTrace has emitted trace footer
   106  	shutdownSema  uint32      // used to wait for ReadTrace completion
   107  	seqStart      uint64      // sequence number when tracing was started
   108  	ticksStart    int64       // cputicks when tracing was started
   109  	ticksEnd      int64       // cputicks when tracing was stopped
   110  	timeStart     int64       // nanotime when tracing was started
   111  	timeEnd       int64       // nanotime when tracing was stopped
   112  	seqGC         uint64      // GC start/done sequencer
   113  	reading       traceBufPtr // buffer currently handed off to user
   114  	empty         traceBufPtr // stack of empty buffers
   115  	fullHead      traceBufPtr // queue of full buffers
   116  	fullTail      traceBufPtr
   117  	reader        guintptr        // goroutine that called ReadTrace, or nil
   118  	stackTab      traceStackTable // maps stack traces to unique ids
   119  
   120  	// Dictionary for traceEvString.
   121  	//
   122  	// Currently this is used only at trace setup and for
   123  	// func/file:line info after tracing session, so we assume
   124  	// single-threaded access.
   125  	strings   map[string]uint64
   126  	stringSeq uint64
   127  
   128  	// markWorkerLabels maps gcMarkWorkerMode to string ID.
   129  	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
   130  
   131  	bufLock mutex       // protects buf
   132  	buf     traceBufPtr // global trace buffer, used when running without a p
   133  }
   134  
   135  // traceBufHeader is per-P tracing buffer.
   136  type traceBufHeader struct {
   137  	link      traceBufPtr             // in trace.empty/full
   138  	lastTicks uint64                  // when we wrote the last event
   139  	pos       int                     // next write offset in arr
   140  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   141  }
   142  
   143  // traceBuf is per-P tracing buffer.
   144  //
   145  //go:notinheap
   146  type traceBuf struct {
   147  	traceBufHeader
   148  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   149  }
   150  
   151  // traceBufPtr is a *traceBuf that is not traced by the garbage
   152  // collector and doesn't have write barriers. traceBufs are not
   153  // allocated from the GC'd heap, so this is safe, and are often
   154  // manipulated in contexts where write barriers are not allowed, so
   155  // this is necessary.
   156  //
   157  // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
   158  type traceBufPtr uintptr
   159  
   160  func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   161  func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   162  func traceBufPtrOf(b *traceBuf) traceBufPtr {
   163  	return traceBufPtr(unsafe.Pointer(b))
   164  }
   165  
   166  // StartTrace enables tracing for the current process.
   167  // While tracing, the data will be buffered and available via ReadTrace.
   168  // StartTrace returns an error if tracing is already enabled.
   169  // Most clients should use the runtime/trace package or the testing package's
   170  // -test.trace flag instead of calling StartTrace directly.
   171  func StartTrace() error {
   172  	// Stop the world, so that we can take a consistent snapshot
   173  	// of all goroutines at the beginning of the trace.
   174  	stopTheWorld("start tracing")
   175  
   176  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   177  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   178  	// and decide to write to trace at a random point in time.
   179  	// However, such syscall will use the global trace.buf buffer, because we've
   180  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   181  	lock(&trace.bufLock)
   182  
   183  	if trace.enabled || trace.shutdown {
   184  		unlock(&trace.bufLock)
   185  		startTheWorld()
   186  		return errorString("tracing is already enabled")
   187  	}
   188  
   189  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   190  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   191  	// That would lead to an inconsistent trace:
   192  	// - either GoSysExit appears before EvGoInSyscall,
   193  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   194  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   195  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   196  	_g_ := getg()
   197  	_g_.m.startingtrace = true
   198  
   199  	// Obtain current stack ID to use in all traceEvGoCreate events below.
   200  	mp := acquirem()
   201  	stkBuf := make([]uintptr, traceStackSize)
   202  	stackID := traceStackID(mp, stkBuf, 2)
   203  	releasem(mp)
   204  
   205  	for _, gp := range allgs {
   206  		status := readgstatus(gp)
   207  		if status != _Gdead {
   208  			gp.traceseq = 0
   209  			gp.tracelastp = getg().m.p
   210  			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   211  			id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
   212  			traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
   213  		}
   214  		if status == _Gwaiting {
   215  			// traceEvGoWaiting is implied to have seq=1.
   216  			gp.traceseq++
   217  			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   218  		}
   219  		if status == _Gsyscall {
   220  			gp.traceseq++
   221  			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   222  		} else {
   223  			gp.sysblocktraced = false
   224  		}
   225  	}
   226  	traceProcStart()
   227  	traceGoStart()
   228  	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
   229  	// If we do it the other way around, it is possible that exitsyscall will
   230  	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
   231  	// It will lead to a false conclusion that cputicks is broken.
   232  	trace.ticksStart = cputicks()
   233  	trace.timeStart = nanotime()
   234  	trace.headerWritten = false
   235  	trace.footerWritten = false
   236  	trace.strings = make(map[string]uint64)
   237  	trace.stringSeq = 0
   238  	trace.seqGC = 0
   239  	_g_.m.startingtrace = false
   240  	trace.enabled = true
   241  
   242  	// Register runtime goroutine labels.
   243  	_, pid, bufp := traceAcquireBuffer()
   244  	buf := (*bufp).ptr()
   245  	if buf == nil {
   246  		buf = traceFlush(0).ptr()
   247  		(*bufp).set(buf)
   248  	}
   249  	for i, label := range gcMarkWorkerModeStrings[:] {
   250  		trace.markWorkerLabels[i], buf = traceString(buf, label)
   251  	}
   252  	traceReleaseBuffer(pid)
   253  
   254  	unlock(&trace.bufLock)
   255  
   256  	startTheWorld()
   257  	return nil
   258  }
   259  
   260  // StopTrace stops tracing, if it was previously enabled.
   261  // StopTrace only returns after all the reads for the trace have completed.
   262  func StopTrace() {
   263  	// Stop the world so that we can collect the trace buffers from all p's below,
   264  	// and also to avoid races with traceEvent.
   265  	stopTheWorld("stop tracing")
   266  
   267  	// See the comment in StartTrace.
   268  	lock(&trace.bufLock)
   269  
   270  	if !trace.enabled {
   271  		unlock(&trace.bufLock)
   272  		startTheWorld()
   273  		return
   274  	}
   275  
   276  	traceGoSched()
   277  
   278  	for _, p := range &allp {
   279  		if p == nil {
   280  			break
   281  		}
   282  		buf := p.tracebuf
   283  		if buf != 0 {
   284  			traceFullQueue(buf)
   285  			p.tracebuf = 0
   286  		}
   287  	}
   288  	if trace.buf != 0 {
   289  		buf := trace.buf
   290  		trace.buf = 0
   291  		if buf.ptr().pos != 0 {
   292  			traceFullQueue(buf)
   293  		}
   294  	}
   295  
   296  	for {
   297  		trace.ticksEnd = cputicks()
   298  		trace.timeEnd = nanotime()
   299  		// Windows time can tick only every 15ms, wait for at least one tick.
   300  		if trace.timeEnd != trace.timeStart {
   301  			break
   302  		}
   303  		osyield()
   304  	}
   305  
   306  	trace.enabled = false
   307  	trace.shutdown = true
   308  	unlock(&trace.bufLock)
   309  
   310  	startTheWorld()
   311  
   312  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   313  	// Wait for the trace reader to flush pending buffers and stop.
   314  	semacquire(&trace.shutdownSema, 0)
   315  	if raceenabled {
   316  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   317  	}
   318  
   319  	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   320  	lock(&trace.lock)
   321  	for _, p := range &allp {
   322  		if p == nil {
   323  			break
   324  		}
   325  		if p.tracebuf != 0 {
   326  			throw("trace: non-empty trace buffer in proc")
   327  		}
   328  	}
   329  	if trace.buf != 0 {
   330  		throw("trace: non-empty global trace buffer")
   331  	}
   332  	if trace.fullHead != 0 || trace.fullTail != 0 {
   333  		throw("trace: non-empty full trace buffer")
   334  	}
   335  	if trace.reading != 0 || trace.reader != 0 {
   336  		throw("trace: reading after shutdown")
   337  	}
   338  	for trace.empty != 0 {
   339  		buf := trace.empty
   340  		trace.empty = buf.ptr().link
   341  		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   342  	}
   343  	trace.strings = nil
   344  	trace.shutdown = false
   345  	unlock(&trace.lock)
   346  }
   347  
   348  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   349  // is available. If tracing is turned off and all the data accumulated while it
   350  // was on has been returned, ReadTrace returns nil. The caller must copy the
   351  // returned data before calling ReadTrace again.
   352  // ReadTrace must be called from one goroutine at a time.
   353  func ReadTrace() []byte {
   354  	// This function may need to lock trace.lock recursively
   355  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   356  	// To allow this we use trace.lockOwner.
   357  	// Also this function must not allocate while holding trace.lock:
   358  	// allocation can call heap allocate, which will try to emit a trace
   359  	// event while holding heap lock.
   360  	lock(&trace.lock)
   361  	trace.lockOwner = getg()
   362  
   363  	if trace.reader != 0 {
   364  		// More than one goroutine reads trace. This is bad.
   365  		// But we rather do not crash the program because of tracing,
   366  		// because tracing can be enabled at runtime on prod servers.
   367  		trace.lockOwner = nil
   368  		unlock(&trace.lock)
   369  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   370  		return nil
   371  	}
   372  	// Recycle the old buffer.
   373  	if buf := trace.reading; buf != 0 {
   374  		buf.ptr().link = trace.empty
   375  		trace.empty = buf
   376  		trace.reading = 0
   377  	}
   378  	// Write trace header.
   379  	if !trace.headerWritten {
   380  		trace.headerWritten = true
   381  		trace.lockOwner = nil
   382  		unlock(&trace.lock)
   383  		return []byte("go 1.8 trace\x00\x00\x00\x00")
   384  	}
   385  	// Wait for new data.
   386  	if trace.fullHead == 0 && !trace.shutdown {
   387  		trace.reader.set(getg())
   388  		goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
   389  		lock(&trace.lock)
   390  	}
   391  	// Write a buffer.
   392  	if trace.fullHead != 0 {
   393  		buf := traceFullDequeue()
   394  		trace.reading = buf
   395  		trace.lockOwner = nil
   396  		unlock(&trace.lock)
   397  		return buf.ptr().arr[:buf.ptr().pos]
   398  	}
   399  	// Write footer with timer frequency.
   400  	if !trace.footerWritten {
   401  		trace.footerWritten = true
   402  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   403  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   404  		trace.lockOwner = nil
   405  		unlock(&trace.lock)
   406  		var data []byte
   407  		data = append(data, traceEvFrequency|0<<traceArgCountShift)
   408  		data = traceAppend(data, uint64(freq))
   409  		if timers.gp != nil {
   410  			data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
   411  			data = traceAppend(data, uint64(timers.gp.goid))
   412  		}
   413  		// This will emit a bunch of full buffers, we will pick them up
   414  		// on the next iteration.
   415  		trace.stackTab.dump()
   416  		return data
   417  	}
   418  	// Done.
   419  	if trace.shutdown {
   420  		trace.lockOwner = nil
   421  		unlock(&trace.lock)
   422  		if raceenabled {
   423  			// Model synchronization on trace.shutdownSema, which race
   424  			// detector does not see. This is required to avoid false
   425  			// race reports on writer passed to trace.Start.
   426  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   427  		}
   428  		// trace.enabled is already reset, so can call traceable functions.
   429  		semrelease(&trace.shutdownSema)
   430  		return nil
   431  	}
   432  	// Also bad, but see the comment above.
   433  	trace.lockOwner = nil
   434  	unlock(&trace.lock)
   435  	println("runtime: spurious wakeup of trace reader")
   436  	return nil
   437  }
   438  
   439  // traceReader returns the trace reader that should be woken up, if any.
   440  func traceReader() *g {
   441  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   442  		return nil
   443  	}
   444  	lock(&trace.lock)
   445  	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   446  		unlock(&trace.lock)
   447  		return nil
   448  	}
   449  	gp := trace.reader.ptr()
   450  	trace.reader.set(nil)
   451  	unlock(&trace.lock)
   452  	return gp
   453  }
   454  
   455  // traceProcFree frees trace buffer associated with pp.
   456  func traceProcFree(pp *p) {
   457  	buf := pp.tracebuf
   458  	pp.tracebuf = 0
   459  	if buf == 0 {
   460  		return
   461  	}
   462  	lock(&trace.lock)
   463  	traceFullQueue(buf)
   464  	unlock(&trace.lock)
   465  }
   466  
   467  // traceFullQueue queues buf into queue of full buffers.
   468  func traceFullQueue(buf traceBufPtr) {
   469  	buf.ptr().link = 0
   470  	if trace.fullHead == 0 {
   471  		trace.fullHead = buf
   472  	} else {
   473  		trace.fullTail.ptr().link = buf
   474  	}
   475  	trace.fullTail = buf
   476  }
   477  
   478  // traceFullDequeue dequeues from queue of full buffers.
   479  func traceFullDequeue() traceBufPtr {
   480  	buf := trace.fullHead
   481  	if buf == 0 {
   482  		return 0
   483  	}
   484  	trace.fullHead = buf.ptr().link
   485  	if trace.fullHead == 0 {
   486  		trace.fullTail = 0
   487  	}
   488  	buf.ptr().link = 0
   489  	return buf
   490  }
   491  
   492  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   493  // ev is event type.
   494  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   495  // If skip = 0, this event type should contain a stack, but we don't want
   496  // to collect and remember it for this particular call.
   497  func traceEvent(ev byte, skip int, args ...uint64) {
   498  	mp, pid, bufp := traceAcquireBuffer()
   499  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   500  	// This protects from races between traceEvent and StartTrace/StopTrace.
   501  
   502  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   503  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   504  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   505  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   506  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   507  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   508  	if !trace.enabled && !mp.startingtrace {
   509  		traceReleaseBuffer(pid)
   510  		return
   511  	}
   512  	buf := (*bufp).ptr()
   513  	const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
   514  	if buf == nil || len(buf.arr)-buf.pos < maxSize {
   515  		buf = traceFlush(traceBufPtrOf(buf)).ptr()
   516  		(*bufp).set(buf)
   517  	}
   518  
   519  	ticks := uint64(cputicks()) / traceTickDiv
   520  	tickDiff := ticks - buf.lastTicks
   521  	if buf.pos == 0 {
   522  		buf.byte(traceEvBatch | 1<<traceArgCountShift)
   523  		buf.varint(uint64(pid))
   524  		buf.varint(ticks)
   525  		tickDiff = 0
   526  	}
   527  	buf.lastTicks = ticks
   528  	narg := byte(len(args))
   529  	if skip >= 0 {
   530  		narg++
   531  	}
   532  	// We have only 2 bits for number of arguments.
   533  	// If number is >= 3, then the event type is followed by event length in bytes.
   534  	if narg > 3 {
   535  		narg = 3
   536  	}
   537  	startPos := buf.pos
   538  	buf.byte(ev | narg<<traceArgCountShift)
   539  	var lenp *byte
   540  	if narg == 3 {
   541  		// Reserve the byte for length assuming that length < 128.
   542  		buf.varint(0)
   543  		lenp = &buf.arr[buf.pos-1]
   544  	}
   545  	buf.varint(tickDiff)
   546  	for _, a := range args {
   547  		buf.varint(a)
   548  	}
   549  	if skip == 0 {
   550  		buf.varint(0)
   551  	} else if skip > 0 {
   552  		buf.varint(traceStackID(mp, buf.stk[:], skip))
   553  	}
   554  	evSize := buf.pos - startPos
   555  	if evSize > maxSize {
   556  		throw("invalid length of trace event")
   557  	}
   558  	if lenp != nil {
   559  		// Fill in actual length.
   560  		*lenp = byte(evSize - 2)
   561  	}
   562  	traceReleaseBuffer(pid)
   563  }
   564  
   565  func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
   566  	_g_ := getg()
   567  	gp := mp.curg
   568  	var nstk int
   569  	if gp == _g_ {
   570  		nstk = callers(skip+1, buf[:])
   571  	} else if gp != nil {
   572  		gp = mp.curg
   573  		// This may happen when tracing a system call,
   574  		// so we must lock the stack.
   575  		if gcTryLockStackBarriers(gp) {
   576  			nstk = gcallers(gp, skip, buf[:])
   577  			gcUnlockStackBarriers(gp)
   578  		}
   579  	}
   580  	if nstk > 0 {
   581  		nstk-- // skip runtime.goexit
   582  	}
   583  	if nstk > 0 && gp.goid == 1 {
   584  		nstk-- // skip runtime.main
   585  	}
   586  	id := trace.stackTab.put(buf[:nstk])
   587  	return uint64(id)
   588  }
   589  
   590  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   591  func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   592  	mp = acquirem()
   593  	if p := mp.p.ptr(); p != nil {
   594  		return mp, p.id, &p.tracebuf
   595  	}
   596  	lock(&trace.bufLock)
   597  	return mp, traceGlobProc, &trace.buf
   598  }
   599  
   600  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   601  func traceReleaseBuffer(pid int32) {
   602  	if pid == traceGlobProc {
   603  		unlock(&trace.bufLock)
   604  	}
   605  	releasem(getg().m)
   606  }
   607  
   608  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   609  func traceFlush(buf traceBufPtr) traceBufPtr {
   610  	owner := trace.lockOwner
   611  	dolock := owner == nil || owner != getg().m.curg
   612  	if dolock {
   613  		lock(&trace.lock)
   614  	}
   615  	if buf != 0 {
   616  		traceFullQueue(buf)
   617  	}
   618  	if trace.empty != 0 {
   619  		buf = trace.empty
   620  		trace.empty = buf.ptr().link
   621  	} else {
   622  		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   623  		if buf == 0 {
   624  			throw("trace: out of memory")
   625  		}
   626  	}
   627  	bufp := buf.ptr()
   628  	bufp.link.set(nil)
   629  	bufp.pos = 0
   630  	bufp.lastTicks = 0
   631  	if dolock {
   632  		unlock(&trace.lock)
   633  	}
   634  	return buf
   635  }
   636  
   637  func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
   638  	if s == "" {
   639  		return 0, buf
   640  	}
   641  	if id, ok := trace.strings[s]; ok {
   642  		return id, buf
   643  	}
   644  
   645  	trace.stringSeq++
   646  	id := trace.stringSeq
   647  	trace.strings[s] = id
   648  
   649  	size := 1 + 2*traceBytesPerNumber + len(s)
   650  	if len(buf.arr)-buf.pos < size {
   651  		buf = traceFlush(traceBufPtrOf(buf)).ptr()
   652  	}
   653  	buf.byte(traceEvString)
   654  	buf.varint(id)
   655  	buf.varint(uint64(len(s)))
   656  	buf.pos += copy(buf.arr[buf.pos:], s)
   657  	return id, buf
   658  }
   659  
   660  // traceAppend appends v to buf in little-endian-base-128 encoding.
   661  func traceAppend(buf []byte, v uint64) []byte {
   662  	for ; v >= 0x80; v >>= 7 {
   663  		buf = append(buf, 0x80|byte(v))
   664  	}
   665  	buf = append(buf, byte(v))
   666  	return buf
   667  }
   668  
   669  // varint appends v to buf in little-endian-base-128 encoding.
   670  func (buf *traceBuf) varint(v uint64) {
   671  	pos := buf.pos
   672  	for ; v >= 0x80; v >>= 7 {
   673  		buf.arr[pos] = 0x80 | byte(v)
   674  		pos++
   675  	}
   676  	buf.arr[pos] = byte(v)
   677  	pos++
   678  	buf.pos = pos
   679  }
   680  
   681  // byte appends v to buf.
   682  func (buf *traceBuf) byte(v byte) {
   683  	buf.arr[buf.pos] = v
   684  	buf.pos++
   685  }
   686  
   687  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   688  // It is lock-free for reading.
   689  type traceStackTable struct {
   690  	lock mutex
   691  	seq  uint32
   692  	mem  traceAlloc
   693  	tab  [1 << 13]traceStackPtr
   694  }
   695  
   696  // traceStack is a single stack in traceStackTable.
   697  type traceStack struct {
   698  	link traceStackPtr
   699  	hash uintptr
   700  	id   uint32
   701  	n    int
   702  	stk  [0]uintptr // real type [n]uintptr
   703  }
   704  
   705  type traceStackPtr uintptr
   706  
   707  func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
   708  
   709  // stack returns slice of PCs.
   710  func (ts *traceStack) stack() []uintptr {
   711  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   712  }
   713  
   714  // put returns a unique id for the stack trace pcs and caches it in the table,
   715  // if it sees the trace for the first time.
   716  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   717  	if len(pcs) == 0 {
   718  		return 0
   719  	}
   720  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   721  	// First, search the hashtable w/o the mutex.
   722  	if id := tab.find(pcs, hash); id != 0 {
   723  		return id
   724  	}
   725  	// Now, double check under the mutex.
   726  	lock(&tab.lock)
   727  	if id := tab.find(pcs, hash); id != 0 {
   728  		unlock(&tab.lock)
   729  		return id
   730  	}
   731  	// Create new record.
   732  	tab.seq++
   733  	stk := tab.newStack(len(pcs))
   734  	stk.hash = hash
   735  	stk.id = tab.seq
   736  	stk.n = len(pcs)
   737  	stkpc := stk.stack()
   738  	for i, pc := range pcs {
   739  		stkpc[i] = pc
   740  	}
   741  	part := int(hash % uintptr(len(tab.tab)))
   742  	stk.link = tab.tab[part]
   743  	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   744  	unlock(&tab.lock)
   745  	return stk.id
   746  }
   747  
   748  // find checks if the stack trace pcs is already present in the table.
   749  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   750  	part := int(hash % uintptr(len(tab.tab)))
   751  Search:
   752  	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
   753  		if stk.hash == hash && stk.n == len(pcs) {
   754  			for i, stkpc := range stk.stack() {
   755  				if stkpc != pcs[i] {
   756  					continue Search
   757  				}
   758  			}
   759  			return stk.id
   760  		}
   761  	}
   762  	return 0
   763  }
   764  
   765  // newStack allocates a new stack of size n.
   766  func (tab *traceStackTable) newStack(n int) *traceStack {
   767  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
   768  }
   769  
   770  // dump writes all previously cached stacks to trace buffers,
   771  // releases all memory and resets state.
   772  func (tab *traceStackTable) dump() {
   773  	frames := make(map[uintptr]traceFrame)
   774  	var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
   775  	buf := traceFlush(0).ptr()
   776  	for _, stk := range tab.tab {
   777  		stk := stk.ptr()
   778  		for ; stk != nil; stk = stk.link.ptr() {
   779  			tmpbuf := tmp[:0]
   780  			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   781  			tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
   782  			for _, pc := range stk.stack() {
   783  				var frame traceFrame
   784  				frame, buf = traceFrameForPC(buf, frames, pc)
   785  				tmpbuf = traceAppend(tmpbuf, uint64(pc))
   786  				tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
   787  				tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
   788  				tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
   789  			}
   790  			// Now copy to the buffer.
   791  			size := 1 + traceBytesPerNumber + len(tmpbuf)
   792  			if len(buf.arr)-buf.pos < size {
   793  				buf = traceFlush(traceBufPtrOf(buf)).ptr()
   794  			}
   795  			buf.byte(traceEvStack | 3<<traceArgCountShift)
   796  			buf.varint(uint64(len(tmpbuf)))
   797  			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
   798  		}
   799  	}
   800  
   801  	lock(&trace.lock)
   802  	traceFullQueue(traceBufPtrOf(buf))
   803  	unlock(&trace.lock)
   804  
   805  	tab.mem.drop()
   806  	*tab = traceStackTable{}
   807  }
   808  
   809  type traceFrame struct {
   810  	funcID uint64
   811  	fileID uint64
   812  	line   uint64
   813  }
   814  
   815  func traceFrameForPC(buf *traceBuf, frames map[uintptr]traceFrame, pc uintptr) (traceFrame, *traceBuf) {
   816  	if frame, ok := frames[pc]; ok {
   817  		return frame, buf
   818  	}
   819  
   820  	var frame traceFrame
   821  	f := findfunc(pc)
   822  	if f == nil {
   823  		frames[pc] = frame
   824  		return frame, buf
   825  	}
   826  
   827  	fn := funcname(f)
   828  	const maxLen = 1 << 10
   829  	if len(fn) > maxLen {
   830  		fn = fn[len(fn)-maxLen:]
   831  	}
   832  	frame.funcID, buf = traceString(buf, fn)
   833  	file, line := funcline(f, pc-sys.PCQuantum)
   834  	frame.line = uint64(line)
   835  	if len(file) > maxLen {
   836  		file = file[len(file)-maxLen:]
   837  	}
   838  	frame.fileID, buf = traceString(buf, file)
   839  	return frame, buf
   840  }
   841  
   842  // traceAlloc is a non-thread-safe region allocator.
   843  // It holds a linked list of traceAllocBlock.
   844  type traceAlloc struct {
   845  	head traceAllocBlockPtr
   846  	off  uintptr
   847  }
   848  
   849  // traceAllocBlock is a block in traceAlloc.
   850  //
   851  // traceAllocBlock is allocated from non-GC'd memory, so it must not
   852  // contain heap pointers. Writes to pointers to traceAllocBlocks do
   853  // not need write barriers.
   854  //
   855  //go:notinheap
   856  type traceAllocBlock struct {
   857  	next traceAllocBlockPtr
   858  	data [64<<10 - sys.PtrSize]byte
   859  }
   860  
   861  // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
   862  type traceAllocBlockPtr uintptr
   863  
   864  func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
   865  func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
   866  
   867  // alloc allocates n-byte block.
   868  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   869  	n = round(n, sys.PtrSize)
   870  	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
   871  		if n > uintptr(len(a.head.ptr().data)) {
   872  			throw("trace: alloc too large")
   873  		}
   874  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   875  		if block == nil {
   876  			throw("trace: out of memory")
   877  		}
   878  		block.next.set(a.head.ptr())
   879  		a.head.set(block)
   880  		a.off = 0
   881  	}
   882  	p := &a.head.ptr().data[a.off]
   883  	a.off += n
   884  	return unsafe.Pointer(p)
   885  }
   886  
   887  // drop frees all previously allocated memory and resets the allocator.
   888  func (a *traceAlloc) drop() {
   889  	for a.head != 0 {
   890  		block := a.head.ptr()
   891  		a.head.set(block.next.ptr())
   892  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   893  	}
   894  }
   895  
   896  // The following functions write specific events to trace.
   897  
   898  func traceGomaxprocs(procs int32) {
   899  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   900  }
   901  
   902  func traceProcStart() {
   903  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   904  }
   905  
   906  func traceProcStop(pp *p) {
   907  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   908  	// to handle this we temporary employ the P.
   909  	mp := acquirem()
   910  	oldp := mp.p
   911  	mp.p.set(pp)
   912  	traceEvent(traceEvProcStop, -1)
   913  	mp.p = oldp
   914  	releasem(mp)
   915  }
   916  
   917  func traceGCStart() {
   918  	traceEvent(traceEvGCStart, 3, trace.seqGC)
   919  	trace.seqGC++
   920  }
   921  
   922  func traceGCDone() {
   923  	traceEvent(traceEvGCDone, -1)
   924  }
   925  
   926  func traceGCScanStart() {
   927  	traceEvent(traceEvGCScanStart, -1)
   928  }
   929  
   930  func traceGCScanDone() {
   931  	traceEvent(traceEvGCScanDone, -1)
   932  }
   933  
   934  func traceGCSweepStart() {
   935  	traceEvent(traceEvGCSweepStart, 1)
   936  }
   937  
   938  func traceGCSweepDone() {
   939  	traceEvent(traceEvGCSweepDone, -1)
   940  }
   941  
   942  func traceGoCreate(newg *g, pc uintptr) {
   943  	newg.traceseq = 0
   944  	newg.tracelastp = getg().m.p
   945  	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   946  	id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
   947  	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
   948  }
   949  
   950  func traceGoStart() {
   951  	_g_ := getg().m.curg
   952  	_p_ := _g_.m.p
   953  	_g_.traceseq++
   954  	if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
   955  		traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
   956  	} else if _g_.tracelastp == _p_ {
   957  		traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
   958  	} else {
   959  		_g_.tracelastp = _p_
   960  		traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
   961  	}
   962  }
   963  
   964  func traceGoEnd() {
   965  	traceEvent(traceEvGoEnd, -1)
   966  }
   967  
   968  func traceGoSched() {
   969  	_g_ := getg()
   970  	_g_.tracelastp = _g_.m.p
   971  	traceEvent(traceEvGoSched, 1)
   972  }
   973  
   974  func traceGoPreempt() {
   975  	_g_ := getg()
   976  	_g_.tracelastp = _g_.m.p
   977  	traceEvent(traceEvGoPreempt, 1)
   978  }
   979  
   980  func traceGoPark(traceEv byte, skip int, gp *g) {
   981  	if traceEv&traceFutileWakeup != 0 {
   982  		traceEvent(traceEvFutileWakeup, -1)
   983  	}
   984  	traceEvent(traceEv & ^traceFutileWakeup, skip)
   985  }
   986  
   987  func traceGoUnpark(gp *g, skip int) {
   988  	_p_ := getg().m.p
   989  	gp.traceseq++
   990  	if gp.tracelastp == _p_ {
   991  		traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
   992  	} else {
   993  		gp.tracelastp = _p_
   994  		traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
   995  	}
   996  }
   997  
   998  func traceGoSysCall() {
   999  	traceEvent(traceEvGoSysCall, 1)
  1000  }
  1001  
  1002  func traceGoSysExit(ts int64) {
  1003  	if ts != 0 && ts < trace.ticksStart {
  1004  		// There is a race between the code that initializes sysexitticks
  1005  		// (in exitsyscall, which runs without a P, and therefore is not
  1006  		// stopped with the rest of the world) and the code that initializes
  1007  		// a new trace. The recorded sysexitticks must therefore be treated
  1008  		// as "best effort". If they are valid for this trace, then great,
  1009  		// use them for greater accuracy. But if they're not valid for this
  1010  		// trace, assume that the trace was started after the actual syscall
  1011  		// exit (but before we actually managed to start the goroutine,
  1012  		// aka right now), and assign a fresh time stamp to keep the log consistent.
  1013  		ts = 0
  1014  	}
  1015  	_g_ := getg().m.curg
  1016  	_g_.traceseq++
  1017  	_g_.tracelastp = _g_.m.p
  1018  	traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
  1019  }
  1020  
  1021  func traceGoSysBlock(pp *p) {
  1022  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
  1023  	// to handle this we temporary employ the P.
  1024  	mp := acquirem()
  1025  	oldp := mp.p
  1026  	mp.p.set(pp)
  1027  	traceEvent(traceEvGoSysBlock, -1)
  1028  	mp.p = oldp
  1029  	releasem(mp)
  1030  }
  1031  
  1032  func traceHeapAlloc() {
  1033  	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
  1034  }
  1035  
  1036  func traceNextGC() {
  1037  	if memstats.next_gc == ^uint64(0) {
  1038  		// Heap-based triggering is disabled.
  1039  		traceEvent(traceEvNextGC, -1, 0)
  1040  	} else {
  1041  		traceEvent(traceEvNextGC, -1, memstats.next_gc)
  1042  	}
  1043  }