github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/trace.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import (
    16  	"runtime/internal/atomic"
    17  	"runtime/internal/sys"
    18  	"unsafe"
    19  )
    20  
    21  // Event types in the trace, args are given in square brackets.
    22  const (
    23  	traceEvNone           = 0  // unused
    24  	traceEvBatch          = 1  // start of per-P batch of events [pid, timestamp]
    25  	traceEvFrequency      = 2  // contains tracer timer frequency [frequency (ticks per second)]
    26  	traceEvStack          = 3  // stack [stack id, number of PCs, array of PCs]
    27  	traceEvGomaxprocs     = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    28  	traceEvProcStart      = 5  // start of P [timestamp, thread id]
    29  	traceEvProcStop       = 6  // stop of P [timestamp]
    30  	traceEvGCStart        = 7  // GC start [timestamp, stack id]
    31  	traceEvGCDone         = 8  // GC done [timestamp]
    32  	traceEvGCScanStart    = 9  // GC scan start [timestamp]
    33  	traceEvGCScanDone     = 10 // GC scan done [timestamp]
    34  	traceEvGCSweepStart   = 11 // GC sweep start [timestamp, stack id]
    35  	traceEvGCSweepDone    = 12 // GC sweep done [timestamp]
    36  	traceEvGoCreate       = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id]
    37  	traceEvGoStart        = 14 // goroutine starts running [timestamp, goroutine id]
    38  	traceEvGoEnd          = 15 // goroutine ends [timestamp]
    39  	traceEvGoStop         = 16 // goroutine stops (like in select{}) [timestamp, stack]
    40  	traceEvGoSched        = 17 // goroutine calls Gosched [timestamp, stack]
    41  	traceEvGoPreempt      = 18 // goroutine is preempted [timestamp, stack]
    42  	traceEvGoSleep        = 19 // goroutine calls Sleep [timestamp, stack]
    43  	traceEvGoBlock        = 20 // goroutine blocks [timestamp, stack]
    44  	traceEvGoUnblock      = 21 // goroutine is unblocked [timestamp, goroutine id, stack]
    45  	traceEvGoBlockSend    = 22 // goroutine blocks on chan send [timestamp, stack]
    46  	traceEvGoBlockRecv    = 23 // goroutine blocks on chan recv [timestamp, stack]
    47  	traceEvGoBlockSelect  = 24 // goroutine blocks on select [timestamp, stack]
    48  	traceEvGoBlockSync    = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    49  	traceEvGoBlockCond    = 26 // goroutine blocks on Cond [timestamp, stack]
    50  	traceEvGoBlockNet     = 27 // goroutine blocks on network [timestamp, stack]
    51  	traceEvGoSysCall      = 28 // syscall enter [timestamp, stack]
    52  	traceEvGoSysExit      = 29 // syscall exit [timestamp, goroutine id, real timestamp]
    53  	traceEvGoSysBlock     = 30 // syscall blocks [timestamp]
    54  	traceEvGoWaiting      = 31 // denotes that goroutine is blocked when tracing starts [goroutine id]
    55  	traceEvGoInSyscall    = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id]
    56  	traceEvHeapAlloc      = 33 // memstats.heap_live change [timestamp, heap_alloc]
    57  	traceEvNextGC         = 34 // memstats.next_gc change [timestamp, next_gc]
    58  	traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
    59  	traceEvFutileWakeup   = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    60  	traceEvCount          = 37
    61  )
    62  
    63  const (
    64  	// Timestamps in trace are cputicks/traceTickDiv.
    65  	// This makes absolute values of timestamp diffs smaller,
    66  	// and so they are encoded in less number of bytes.
    67  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    68  	// The suggested increment frequency for PowerPC's time base register is
    69  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    70  	// and ppc64le.
    71  	// Tracing won't work reliably for architectures where cputicks is emulated
    72  	// by nanotime, so the value doesn't matter for those architectures.
    73  	traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
    74  	// Maximum number of PCs in a single stack trace.
    75  	// Since events contain only stack id rather than whole stack trace,
    76  	// we can allow quite large values here.
    77  	traceStackSize = 128
    78  	// Identifier of a fake P that is used when we trace without a real P.
    79  	traceGlobProc = -1
    80  	// Maximum number of bytes to encode uint64 in base-128.
    81  	traceBytesPerNumber = 10
    82  	// Shift of the number of arguments in the first event byte.
    83  	traceArgCountShift = 6
    84  	// Flag passed to traceGoPark to denote that the previous wakeup of this
    85  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
    86  	// but another goroutine got ahead and acquired the mutex before the first
    87  	// goroutine is scheduled, so the first goroutine has to block again.
    88  	// Such wakeups happen on buffered channels and sync.Mutex,
    89  	// but are generally not interesting for end user.
    90  	traceFutileWakeup byte = 128
    91  )
    92  
    93  // trace is global tracing context.
    94  var trace struct {
    95  	lock          mutex       // protects the following members
    96  	lockOwner     *g          // to avoid deadlocks during recursive lock locks
    97  	enabled       bool        // when set runtime traces events
    98  	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
    99  	headerWritten bool        // whether ReadTrace has emitted trace header
   100  	footerWritten bool        // whether ReadTrace has emitted trace footer
   101  	shutdownSema  uint32      // used to wait for ReadTrace completion
   102  	seqStart      uint64      // sequence number when tracing was started
   103  	ticksStart    int64       // cputicks when tracing was started
   104  	ticksEnd      int64       // cputicks when tracing was stopped
   105  	timeStart     int64       // nanotime when tracing was started
   106  	timeEnd       int64       // nanotime when tracing was stopped
   107  	reading       traceBufPtr // buffer currently handed off to user
   108  	empty         traceBufPtr // stack of empty buffers
   109  	fullHead      traceBufPtr // queue of full buffers
   110  	fullTail      traceBufPtr
   111  	reader        *g              // goroutine that called ReadTrace, or nil
   112  	stackTab      traceStackTable // maps stack traces to unique ids
   113  
   114  	bufLock mutex       // protects buf
   115  	buf     traceBufPtr // global trace buffer, used when running without a p
   116  }
   117  
   118  var traceseq uint64 // global trace sequence number
   119  
   120  // tracestamp returns a consistent sequence number, time stamp pair
   121  // for use in a trace. We need to make sure that time stamp ordering
   122  // (assuming synchronized CPUs) and sequence ordering match.
   123  // To do that, we increment traceseq, grab ticks, and increment traceseq again.
   124  // We treat odd traceseq as a sign that another thread is in the middle
   125  // of the sequence and spin until it is done.
   126  // Not splitting stack to avoid preemption, just in case the call sites
   127  // that used to call xadd64 and cputicks are sensitive to that.
   128  //go:nosplit
   129  func tracestamp() (seq uint64, ts int64) {
   130  	seq = atomic.Load64(&traceseq)
   131  	for seq&1 != 0 || !atomic.Cas64(&traceseq, seq, seq+1) {
   132  		seq = atomic.Load64(&traceseq)
   133  	}
   134  	ts = cputicks()
   135  	atomic.Store64(&traceseq, seq+2)
   136  	return seq >> 1, ts
   137  }
   138  
   139  // traceBufHeader is per-P tracing buffer.
   140  type traceBufHeader struct {
   141  	link      traceBufPtr             // in trace.empty/full
   142  	lastSeq   uint64                  // sequence number of last event
   143  	lastTicks uint64                  // when we wrote the last event
   144  	pos       int                     // next write offset in arr
   145  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   146  }
   147  
   148  // traceBuf is per-P tracing buffer.
   149  type traceBuf struct {
   150  	traceBufHeader
   151  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   152  }
   153  
   154  // traceBufPtr is a *traceBuf that is not traced by the garbage
   155  // collector and doesn't have write barriers. traceBufs are not
   156  // allocated from the GC'd heap, so this is safe, and are often
   157  // manipulated in contexts where write barriers are not allowed, so
   158  // this is necessary.
   159  type traceBufPtr uintptr
   160  
   161  func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   162  func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   163  func traceBufPtrOf(b *traceBuf) traceBufPtr {
   164  	return traceBufPtr(unsafe.Pointer(b))
   165  }
   166  
   167  // StartTrace enables tracing for the current process.
   168  // While tracing, the data will be buffered and available via ReadTrace.
   169  // StartTrace returns an error if tracing is already enabled.
   170  // Most clients should use the runtime/trace package or the testing package's
   171  // -test.trace flag instead of calling StartTrace directly.
   172  func StartTrace() error {
   173  	// Stop the world, so that we can take a consistent snapshot
   174  	// of all goroutines at the beginning of the trace.
   175  	stopTheWorld("start tracing")
   176  
   177  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   178  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   179  	// and decide to write to trace at a random point in time.
   180  	// However, such syscall will use the global trace.buf buffer, because we've
   181  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   182  	lock(&trace.bufLock)
   183  
   184  	if trace.enabled || trace.shutdown {
   185  		unlock(&trace.bufLock)
   186  		startTheWorld()
   187  		return errorString("tracing is already enabled")
   188  	}
   189  
   190  	trace.seqStart, trace.ticksStart = tracestamp()
   191  	trace.timeStart = nanotime()
   192  	trace.headerWritten = false
   193  	trace.footerWritten = false
   194  
   195  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   196  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   197  	// That would lead to an inconsistent trace:
   198  	// - either GoSysExit appears before EvGoInSyscall,
   199  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   200  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   201  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   202  	_g_ := getg()
   203  	_g_.m.startingtrace = true
   204  	for _, gp := range allgs {
   205  		status := readgstatus(gp)
   206  		if status != _Gdead {
   207  			traceGoCreate(gp, gp.startpc)
   208  		}
   209  		if status == _Gwaiting {
   210  			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   211  		}
   212  		if status == _Gsyscall {
   213  			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   214  		} else {
   215  			gp.sysblocktraced = false
   216  		}
   217  	}
   218  	traceProcStart()
   219  	traceGoStart()
   220  	_g_.m.startingtrace = false
   221  	trace.enabled = true
   222  
   223  	unlock(&trace.bufLock)
   224  
   225  	startTheWorld()
   226  	return nil
   227  }
   228  
   229  // StopTrace stops tracing, if it was previously enabled.
   230  // StopTrace only returns after all the reads for the trace have completed.
   231  func StopTrace() {
   232  	// Stop the world so that we can collect the trace buffers from all p's below,
   233  	// and also to avoid races with traceEvent.
   234  	stopTheWorld("stop tracing")
   235  
   236  	// See the comment in StartTrace.
   237  	lock(&trace.bufLock)
   238  
   239  	if !trace.enabled {
   240  		unlock(&trace.bufLock)
   241  		startTheWorld()
   242  		return
   243  	}
   244  
   245  	traceGoSched()
   246  
   247  	for _, p := range &allp {
   248  		if p == nil {
   249  			break
   250  		}
   251  		buf := p.tracebuf
   252  		if buf != 0 {
   253  			traceFullQueue(buf)
   254  			p.tracebuf = 0
   255  		}
   256  	}
   257  	if trace.buf != 0 && trace.buf.ptr().pos != 0 {
   258  		buf := trace.buf
   259  		trace.buf = 0
   260  		traceFullQueue(buf)
   261  	}
   262  
   263  	for {
   264  		trace.ticksEnd = cputicks()
   265  		trace.timeEnd = nanotime()
   266  		// Windows time can tick only every 15ms, wait for at least one tick.
   267  		if trace.timeEnd != trace.timeStart {
   268  			break
   269  		}
   270  		osyield()
   271  	}
   272  
   273  	trace.enabled = false
   274  	trace.shutdown = true
   275  	trace.stackTab.dump()
   276  
   277  	unlock(&trace.bufLock)
   278  
   279  	startTheWorld()
   280  
   281  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   282  	// Wait for the trace reader to flush pending buffers and stop.
   283  	semacquire(&trace.shutdownSema, false)
   284  	if raceenabled {
   285  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   286  	}
   287  
   288  	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   289  	lock(&trace.lock)
   290  	for _, p := range &allp {
   291  		if p == nil {
   292  			break
   293  		}
   294  		if p.tracebuf != 0 {
   295  			throw("trace: non-empty trace buffer in proc")
   296  		}
   297  	}
   298  	if trace.buf != 0 {
   299  		throw("trace: non-empty global trace buffer")
   300  	}
   301  	if trace.fullHead != 0 || trace.fullTail != 0 {
   302  		throw("trace: non-empty full trace buffer")
   303  	}
   304  	if trace.reading != 0 || trace.reader != nil {
   305  		throw("trace: reading after shutdown")
   306  	}
   307  	for trace.empty != 0 {
   308  		buf := trace.empty
   309  		trace.empty = buf.ptr().link
   310  		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   311  	}
   312  	trace.shutdown = false
   313  	unlock(&trace.lock)
   314  }
   315  
   316  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   317  // is available. If tracing is turned off and all the data accumulated while it
   318  // was on has been returned, ReadTrace returns nil. The caller must copy the
   319  // returned data before calling ReadTrace again.
   320  // ReadTrace must be called from one goroutine at a time.
   321  func ReadTrace() []byte {
   322  	// This function may need to lock trace.lock recursively
   323  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   324  	// To allow this we use trace.lockOwner.
   325  	// Also this function must not allocate while holding trace.lock:
   326  	// allocation can call heap allocate, which will try to emit a trace
   327  	// event while holding heap lock.
   328  	lock(&trace.lock)
   329  	trace.lockOwner = getg()
   330  
   331  	if trace.reader != nil {
   332  		// More than one goroutine reads trace. This is bad.
   333  		// But we rather do not crash the program because of tracing,
   334  		// because tracing can be enabled at runtime on prod servers.
   335  		trace.lockOwner = nil
   336  		unlock(&trace.lock)
   337  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   338  		return nil
   339  	}
   340  	// Recycle the old buffer.
   341  	if buf := trace.reading; buf != 0 {
   342  		buf.ptr().link = trace.empty
   343  		trace.empty = buf
   344  		trace.reading = 0
   345  	}
   346  	// Write trace header.
   347  	if !trace.headerWritten {
   348  		trace.headerWritten = true
   349  		trace.lockOwner = nil
   350  		unlock(&trace.lock)
   351  		return []byte("go 1.5 trace\x00\x00\x00\x00")
   352  	}
   353  	// Wait for new data.
   354  	if trace.fullHead == 0 && !trace.shutdown {
   355  		trace.reader = getg()
   356  		goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
   357  		lock(&trace.lock)
   358  	}
   359  	// Write a buffer.
   360  	if trace.fullHead != 0 {
   361  		buf := traceFullDequeue()
   362  		trace.reading = buf
   363  		trace.lockOwner = nil
   364  		unlock(&trace.lock)
   365  		return buf.ptr().arr[:buf.ptr().pos]
   366  	}
   367  	// Write footer with timer frequency.
   368  	if !trace.footerWritten {
   369  		trace.footerWritten = true
   370  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   371  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   372  		trace.lockOwner = nil
   373  		unlock(&trace.lock)
   374  		var data []byte
   375  		data = append(data, traceEvFrequency|0<<traceArgCountShift)
   376  		data = traceAppend(data, uint64(freq))
   377  		data = traceAppend(data, 0)
   378  		if timers.gp != nil {
   379  			data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
   380  			data = traceAppend(data, uint64(timers.gp.goid))
   381  			data = traceAppend(data, 0)
   382  		}
   383  		return data
   384  	}
   385  	// Done.
   386  	if trace.shutdown {
   387  		trace.lockOwner = nil
   388  		unlock(&trace.lock)
   389  		if raceenabled {
   390  			// Model synchronization on trace.shutdownSema, which race
   391  			// detector does not see. This is required to avoid false
   392  			// race reports on writer passed to trace.Start.
   393  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   394  		}
   395  		// trace.enabled is already reset, so can call traceable functions.
   396  		semrelease(&trace.shutdownSema)
   397  		return nil
   398  	}
   399  	// Also bad, but see the comment above.
   400  	trace.lockOwner = nil
   401  	unlock(&trace.lock)
   402  	println("runtime: spurious wakeup of trace reader")
   403  	return nil
   404  }
   405  
   406  // traceReader returns the trace reader that should be woken up, if any.
   407  func traceReader() *g {
   408  	if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
   409  		return nil
   410  	}
   411  	lock(&trace.lock)
   412  	if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
   413  		unlock(&trace.lock)
   414  		return nil
   415  	}
   416  	gp := trace.reader
   417  	trace.reader = nil
   418  	unlock(&trace.lock)
   419  	return gp
   420  }
   421  
   422  // traceProcFree frees trace buffer associated with pp.
   423  func traceProcFree(pp *p) {
   424  	buf := pp.tracebuf
   425  	pp.tracebuf = 0
   426  	if buf == 0 {
   427  		return
   428  	}
   429  	lock(&trace.lock)
   430  	traceFullQueue(buf)
   431  	unlock(&trace.lock)
   432  }
   433  
   434  // traceFullQueue queues buf into queue of full buffers.
   435  func traceFullQueue(buf traceBufPtr) {
   436  	buf.ptr().link = 0
   437  	if trace.fullHead == 0 {
   438  		trace.fullHead = buf
   439  	} else {
   440  		trace.fullTail.ptr().link = buf
   441  	}
   442  	trace.fullTail = buf
   443  }
   444  
   445  // traceFullDequeue dequeues from queue of full buffers.
   446  func traceFullDequeue() traceBufPtr {
   447  	buf := trace.fullHead
   448  	if buf == 0 {
   449  		return 0
   450  	}
   451  	trace.fullHead = buf.ptr().link
   452  	if trace.fullHead == 0 {
   453  		trace.fullTail = 0
   454  	}
   455  	buf.ptr().link = 0
   456  	return buf
   457  }
   458  
   459  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   460  // ev is event type.
   461  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   462  // If skip = 0, this event type should contain a stack, but we don't want
   463  // to collect and remember it for this particular call.
   464  func traceEvent(ev byte, skip int, args ...uint64) {
   465  	mp, pid, bufp := traceAcquireBuffer()
   466  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   467  	// This protects from races between traceEvent and StartTrace/StopTrace.
   468  
   469  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   470  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   471  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   472  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   473  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   474  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   475  	if !trace.enabled && !mp.startingtrace {
   476  		traceReleaseBuffer(pid)
   477  		return
   478  	}
   479  	buf := (*bufp).ptr()
   480  	const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
   481  	if buf == nil || len(buf.arr)-buf.pos < maxSize {
   482  		buf = traceFlush(traceBufPtrOf(buf)).ptr()
   483  		(*bufp).set(buf)
   484  	}
   485  
   486  	seq, ticksraw := tracestamp()
   487  	seqDiff := seq - buf.lastSeq
   488  	ticks := uint64(ticksraw) / traceTickDiv
   489  	tickDiff := ticks - buf.lastTicks
   490  	if buf.pos == 0 {
   491  		buf.byte(traceEvBatch | 1<<traceArgCountShift)
   492  		buf.varint(uint64(pid))
   493  		buf.varint(seq)
   494  		buf.varint(ticks)
   495  		seqDiff = 0
   496  		tickDiff = 0
   497  	}
   498  	buf.lastSeq = seq
   499  	buf.lastTicks = ticks
   500  	narg := byte(len(args))
   501  	if skip >= 0 {
   502  		narg++
   503  	}
   504  	// We have only 2 bits for number of arguments.
   505  	// If number is >= 3, then the event type is followed by event length in bytes.
   506  	if narg > 3 {
   507  		narg = 3
   508  	}
   509  	startPos := buf.pos
   510  	buf.byte(ev | narg<<traceArgCountShift)
   511  	var lenp *byte
   512  	if narg == 3 {
   513  		// Reserve the byte for length assuming that length < 128.
   514  		buf.varint(0)
   515  		lenp = &buf.arr[buf.pos-1]
   516  	}
   517  	buf.varint(seqDiff)
   518  	buf.varint(tickDiff)
   519  	for _, a := range args {
   520  		buf.varint(a)
   521  	}
   522  	if skip == 0 {
   523  		buf.varint(0)
   524  	} else if skip > 0 {
   525  		_g_ := getg()
   526  		gp := mp.curg
   527  		var nstk int
   528  		if gp == _g_ {
   529  			nstk = callers(skip, buf.stk[:])
   530  		} else if gp != nil {
   531  			gp = mp.curg
   532  			nstk = gcallers(gp, skip, buf.stk[:])
   533  		}
   534  		if nstk > 0 {
   535  			nstk-- // skip runtime.goexit
   536  		}
   537  		if nstk > 0 && gp.goid == 1 {
   538  			nstk-- // skip runtime.main
   539  		}
   540  		id := trace.stackTab.put(buf.stk[:nstk])
   541  		buf.varint(uint64(id))
   542  	}
   543  	evSize := buf.pos - startPos
   544  	if evSize > maxSize {
   545  		throw("invalid length of trace event")
   546  	}
   547  	if lenp != nil {
   548  		// Fill in actual length.
   549  		*lenp = byte(evSize - 2)
   550  	}
   551  	traceReleaseBuffer(pid)
   552  }
   553  
   554  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   555  func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   556  	mp = acquirem()
   557  	if p := mp.p.ptr(); p != nil {
   558  		return mp, p.id, &p.tracebuf
   559  	}
   560  	lock(&trace.bufLock)
   561  	return mp, traceGlobProc, &trace.buf
   562  }
   563  
   564  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   565  func traceReleaseBuffer(pid int32) {
   566  	if pid == traceGlobProc {
   567  		unlock(&trace.bufLock)
   568  	}
   569  	releasem(getg().m)
   570  }
   571  
   572  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   573  func traceFlush(buf traceBufPtr) traceBufPtr {
   574  	owner := trace.lockOwner
   575  	dolock := owner == nil || owner != getg().m.curg
   576  	if dolock {
   577  		lock(&trace.lock)
   578  	}
   579  	if buf != 0 {
   580  		traceFullQueue(buf)
   581  	}
   582  	if trace.empty != 0 {
   583  		buf = trace.empty
   584  		trace.empty = buf.ptr().link
   585  	} else {
   586  		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   587  		if buf == 0 {
   588  			throw("trace: out of memory")
   589  		}
   590  	}
   591  	bufp := buf.ptr()
   592  	bufp.link.set(nil)
   593  	bufp.pos = 0
   594  	bufp.lastTicks = 0
   595  	if dolock {
   596  		unlock(&trace.lock)
   597  	}
   598  	return buf
   599  }
   600  
   601  // traceAppend appends v to buf in little-endian-base-128 encoding.
   602  func traceAppend(buf []byte, v uint64) []byte {
   603  	for ; v >= 0x80; v >>= 7 {
   604  		buf = append(buf, 0x80|byte(v))
   605  	}
   606  	buf = append(buf, byte(v))
   607  	return buf
   608  }
   609  
   610  // varint appends v to buf in little-endian-base-128 encoding.
   611  func (buf *traceBuf) varint(v uint64) {
   612  	pos := buf.pos
   613  	for ; v >= 0x80; v >>= 7 {
   614  		buf.arr[pos] = 0x80 | byte(v)
   615  		pos++
   616  	}
   617  	buf.arr[pos] = byte(v)
   618  	pos++
   619  	buf.pos = pos
   620  }
   621  
   622  // byte appends v to buf.
   623  func (buf *traceBuf) byte(v byte) {
   624  	buf.arr[buf.pos] = v
   625  	buf.pos++
   626  }
   627  
   628  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   629  // It is lock-free for reading.
   630  type traceStackTable struct {
   631  	lock mutex
   632  	seq  uint32
   633  	mem  traceAlloc
   634  	tab  [1 << 13]traceStackPtr
   635  }
   636  
   637  // traceStack is a single stack in traceStackTable.
   638  type traceStack struct {
   639  	link traceStackPtr
   640  	hash uintptr
   641  	id   uint32
   642  	n    int
   643  	stk  [0]uintptr // real type [n]uintptr
   644  }
   645  
   646  type traceStackPtr uintptr
   647  
   648  func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
   649  
   650  // stack returns slice of PCs.
   651  func (ts *traceStack) stack() []uintptr {
   652  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   653  }
   654  
   655  // put returns a unique id for the stack trace pcs and caches it in the table,
   656  // if it sees the trace for the first time.
   657  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   658  	if len(pcs) == 0 {
   659  		return 0
   660  	}
   661  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   662  	// First, search the hashtable w/o the mutex.
   663  	if id := tab.find(pcs, hash); id != 0 {
   664  		return id
   665  	}
   666  	// Now, double check under the mutex.
   667  	lock(&tab.lock)
   668  	if id := tab.find(pcs, hash); id != 0 {
   669  		unlock(&tab.lock)
   670  		return id
   671  	}
   672  	// Create new record.
   673  	tab.seq++
   674  	stk := tab.newStack(len(pcs))
   675  	stk.hash = hash
   676  	stk.id = tab.seq
   677  	stk.n = len(pcs)
   678  	stkpc := stk.stack()
   679  	for i, pc := range pcs {
   680  		stkpc[i] = pc
   681  	}
   682  	part := int(hash % uintptr(len(tab.tab)))
   683  	stk.link = tab.tab[part]
   684  	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   685  	unlock(&tab.lock)
   686  	return stk.id
   687  }
   688  
   689  // find checks if the stack trace pcs is already present in the table.
   690  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   691  	part := int(hash % uintptr(len(tab.tab)))
   692  Search:
   693  	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
   694  		if stk.hash == hash && stk.n == len(pcs) {
   695  			for i, stkpc := range stk.stack() {
   696  				if stkpc != pcs[i] {
   697  					continue Search
   698  				}
   699  			}
   700  			return stk.id
   701  		}
   702  	}
   703  	return 0
   704  }
   705  
   706  // newStack allocates a new stack of size n.
   707  func (tab *traceStackTable) newStack(n int) *traceStack {
   708  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
   709  }
   710  
   711  // dump writes all previously cached stacks to trace buffers,
   712  // releases all memory and resets state.
   713  func (tab *traceStackTable) dump() {
   714  	var tmp [(2 + traceStackSize) * traceBytesPerNumber]byte
   715  	buf := traceFlush(0).ptr()
   716  	for _, stk := range tab.tab {
   717  		stk := stk.ptr()
   718  		for ; stk != nil; stk = stk.link.ptr() {
   719  			maxSize := 1 + (3+stk.n)*traceBytesPerNumber
   720  			if len(buf.arr)-buf.pos < maxSize {
   721  				buf = traceFlush(traceBufPtrOf(buf)).ptr()
   722  			}
   723  			// Form the event in the temp buffer, we need to know the actual length.
   724  			tmpbuf := tmp[:0]
   725  			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   726  			tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
   727  			for _, pc := range stk.stack() {
   728  				tmpbuf = traceAppend(tmpbuf, uint64(pc))
   729  			}
   730  			// Now copy to the buffer.
   731  			buf.byte(traceEvStack | 3<<traceArgCountShift)
   732  			buf.varint(uint64(len(tmpbuf)))
   733  			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
   734  		}
   735  	}
   736  
   737  	lock(&trace.lock)
   738  	traceFullQueue(traceBufPtrOf(buf))
   739  	unlock(&trace.lock)
   740  
   741  	tab.mem.drop()
   742  	*tab = traceStackTable{}
   743  }
   744  
   745  // traceAlloc is a non-thread-safe region allocator.
   746  // It holds a linked list of traceAllocBlock.
   747  type traceAlloc struct {
   748  	head traceAllocBlockPtr
   749  	off  uintptr
   750  }
   751  
   752  // traceAllocBlock is a block in traceAlloc.
   753  //
   754  // traceAllocBlock is allocated from non-GC'd memory, so it must not
   755  // contain heap pointers. Writes to pointers to traceAllocBlocks do
   756  // not need write barriers.
   757  type traceAllocBlock struct {
   758  	next traceAllocBlockPtr
   759  	data [64<<10 - sys.PtrSize]byte
   760  }
   761  
   762  type traceAllocBlockPtr uintptr
   763  
   764  func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
   765  func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
   766  
   767  // alloc allocates n-byte block.
   768  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   769  	n = round(n, sys.PtrSize)
   770  	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
   771  		if n > uintptr(len(a.head.ptr().data)) {
   772  			throw("trace: alloc too large")
   773  		}
   774  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   775  		if block == nil {
   776  			throw("trace: out of memory")
   777  		}
   778  		block.next.set(a.head.ptr())
   779  		a.head.set(block)
   780  		a.off = 0
   781  	}
   782  	p := &a.head.ptr().data[a.off]
   783  	a.off += n
   784  	return unsafe.Pointer(p)
   785  }
   786  
   787  // drop frees all previously allocated memory and resets the allocator.
   788  func (a *traceAlloc) drop() {
   789  	for a.head != 0 {
   790  		block := a.head.ptr()
   791  		a.head.set(block.next.ptr())
   792  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   793  	}
   794  }
   795  
   796  // The following functions write specific events to trace.
   797  
   798  func traceGomaxprocs(procs int32) {
   799  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   800  }
   801  
   802  func traceProcStart() {
   803  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   804  }
   805  
   806  func traceProcStop(pp *p) {
   807  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   808  	// to handle this we temporary employ the P.
   809  	mp := acquirem()
   810  	oldp := mp.p
   811  	mp.p.set(pp)
   812  	traceEvent(traceEvProcStop, -1)
   813  	mp.p = oldp
   814  	releasem(mp)
   815  }
   816  
   817  func traceGCStart() {
   818  	traceEvent(traceEvGCStart, 3)
   819  }
   820  
   821  func traceGCDone() {
   822  	traceEvent(traceEvGCDone, -1)
   823  }
   824  
   825  func traceGCScanStart() {
   826  	traceEvent(traceEvGCScanStart, -1)
   827  }
   828  
   829  func traceGCScanDone() {
   830  	traceEvent(traceEvGCScanDone, -1)
   831  }
   832  
   833  func traceGCSweepStart() {
   834  	traceEvent(traceEvGCSweepStart, 1)
   835  }
   836  
   837  func traceGCSweepDone() {
   838  	traceEvent(traceEvGCSweepDone, -1)
   839  }
   840  
   841  func traceGoCreate(newg *g, pc uintptr) {
   842  	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(pc))
   843  }
   844  
   845  func traceGoStart() {
   846  	traceEvent(traceEvGoStart, -1, uint64(getg().m.curg.goid))
   847  }
   848  
   849  func traceGoEnd() {
   850  	traceEvent(traceEvGoEnd, -1)
   851  }
   852  
   853  func traceGoSched() {
   854  	traceEvent(traceEvGoSched, 1)
   855  }
   856  
   857  func traceGoPreempt() {
   858  	traceEvent(traceEvGoPreempt, 1)
   859  }
   860  
   861  func traceGoPark(traceEv byte, skip int, gp *g) {
   862  	if traceEv&traceFutileWakeup != 0 {
   863  		traceEvent(traceEvFutileWakeup, -1)
   864  	}
   865  	traceEvent(traceEv & ^traceFutileWakeup, skip)
   866  }
   867  
   868  func traceGoUnpark(gp *g, skip int) {
   869  	traceEvent(traceEvGoUnblock, skip, uint64(gp.goid))
   870  }
   871  
   872  func traceGoSysCall() {
   873  	traceEvent(traceEvGoSysCall, 1)
   874  }
   875  
   876  func traceGoSysExit(seq uint64, ts int64) {
   877  	if int64(seq)-int64(trace.seqStart) < 0 {
   878  		// The timestamp was obtained during a previous tracing session, ignore.
   879  		return
   880  	}
   881  	traceEvent(traceEvGoSysExit, -1, uint64(getg().m.curg.goid), seq, uint64(ts)/traceTickDiv)
   882  }
   883  
   884  func traceGoSysBlock(pp *p) {
   885  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
   886  	// to handle this we temporary employ the P.
   887  	mp := acquirem()
   888  	oldp := mp.p
   889  	mp.p.set(pp)
   890  	traceEvent(traceEvGoSysBlock, -1)
   891  	mp.p = oldp
   892  	releasem(mp)
   893  }
   894  
   895  func traceHeapAlloc() {
   896  	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
   897  }
   898  
   899  func traceNextGC() {
   900  	traceEvent(traceEvNextGC, -1, memstats.next_gc)
   901  }