github.com/d4l3k/go@v0.0.0-20151015000803-65fc379daeda/src/runtime/trace.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import "unsafe"
    16  
    17  // Event types in the trace, args are given in square brackets.
    18  const (
    19  	traceEvNone           = 0  // unused
    20  	traceEvBatch          = 1  // start of per-P batch of events [pid, timestamp]
    21  	traceEvFrequency      = 2  // contains tracer timer frequency [frequency (ticks per second)]
    22  	traceEvStack          = 3  // stack [stack id, number of PCs, array of PCs]
    23  	traceEvGomaxprocs     = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    24  	traceEvProcStart      = 5  // start of P [timestamp, thread id]
    25  	traceEvProcStop       = 6  // stop of P [timestamp]
    26  	traceEvGCStart        = 7  // GC start [timestamp, stack id]
    27  	traceEvGCDone         = 8  // GC done [timestamp]
    28  	traceEvGCScanStart    = 9  // GC scan start [timestamp]
    29  	traceEvGCScanDone     = 10 // GC scan done [timestamp]
    30  	traceEvGCSweepStart   = 11 // GC sweep start [timestamp, stack id]
    31  	traceEvGCSweepDone    = 12 // GC sweep done [timestamp]
    32  	traceEvGoCreate       = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id]
    33  	traceEvGoStart        = 14 // goroutine starts running [timestamp, goroutine id]
    34  	traceEvGoEnd          = 15 // goroutine ends [timestamp]
    35  	traceEvGoStop         = 16 // goroutine stops (like in select{}) [timestamp, stack]
    36  	traceEvGoSched        = 17 // goroutine calls Gosched [timestamp, stack]
    37  	traceEvGoPreempt      = 18 // goroutine is preempted [timestamp, stack]
    38  	traceEvGoSleep        = 19 // goroutine calls Sleep [timestamp, stack]
    39  	traceEvGoBlock        = 20 // goroutine blocks [timestamp, stack]
    40  	traceEvGoUnblock      = 21 // goroutine is unblocked [timestamp, goroutine id, stack]
    41  	traceEvGoBlockSend    = 22 // goroutine blocks on chan send [timestamp, stack]
    42  	traceEvGoBlockRecv    = 23 // goroutine blocks on chan recv [timestamp, stack]
    43  	traceEvGoBlockSelect  = 24 // goroutine blocks on select [timestamp, stack]
    44  	traceEvGoBlockSync    = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    45  	traceEvGoBlockCond    = 26 // goroutine blocks on Cond [timestamp, stack]
    46  	traceEvGoBlockNet     = 27 // goroutine blocks on network [timestamp, stack]
    47  	traceEvGoSysCall      = 28 // syscall enter [timestamp, stack]
    48  	traceEvGoSysExit      = 29 // syscall exit [timestamp, goroutine id, real timestamp]
    49  	traceEvGoSysBlock     = 30 // syscall blocks [timestamp]
    50  	traceEvGoWaiting      = 31 // denotes that goroutine is blocked when tracing starts [goroutine id]
    51  	traceEvGoInSyscall    = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id]
    52  	traceEvHeapAlloc      = 33 // memstats.heap_live change [timestamp, heap_alloc]
    53  	traceEvNextGC         = 34 // memstats.next_gc change [timestamp, next_gc]
    54  	traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
    55  	traceEvFutileWakeup   = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    56  	traceEvCount          = 37
    57  )
    58  
    59  const (
    60  	// Timestamps in trace are cputicks/traceTickDiv.
    61  	// This makes absolute values of timestamp diffs smaller,
    62  	// and so they are encoded in less number of bytes.
    63  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    64  	// The suggested increment frequency for PowerPC's time base register is
    65  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    66  	// and ppc64le.
    67  	// Tracing won't work reliably for architectures where cputicks is emulated
    68  	// by nanotime, so the value doesn't matter for those architectures.
    69  	traceTickDiv = 16 + 48*(goarch_386|goarch_amd64|goarch_amd64p32)
    70  	// Maximum number of PCs in a single stack trace.
    71  	// Since events contain only stack id rather than whole stack trace,
    72  	// we can allow quite large values here.
    73  	traceStackSize = 128
    74  	// Identifier of a fake P that is used when we trace without a real P.
    75  	traceGlobProc = -1
    76  	// Maximum number of bytes to encode uint64 in base-128.
    77  	traceBytesPerNumber = 10
    78  	// Shift of the number of arguments in the first event byte.
    79  	traceArgCountShift = 6
    80  	// Flag passed to traceGoPark to denote that the previous wakeup of this
    81  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
    82  	// but another goroutine got ahead and acquired the mutex before the first
    83  	// goroutine is scheduled, so the first goroutine has to block again.
    84  	// Such wakeups happen on buffered channels and sync.Mutex,
    85  	// but are generally not interesting for end user.
    86  	traceFutileWakeup byte = 128
    87  )
    88  
    89  // trace is global tracing context.
    90  var trace struct {
    91  	lock          mutex     // protects the following members
    92  	lockOwner     *g        // to avoid deadlocks during recursive lock locks
    93  	enabled       bool      // when set runtime traces events
    94  	shutdown      bool      // set when we are waiting for trace reader to finish after setting enabled to false
    95  	headerWritten bool      // whether ReadTrace has emitted trace header
    96  	footerWritten bool      // whether ReadTrace has emitted trace footer
    97  	shutdownSema  uint32    // used to wait for ReadTrace completion
    98  	seqStart      uint64    // sequence number when tracing was started
    99  	ticksStart    int64     // cputicks when tracing was started
   100  	ticksEnd      int64     // cputicks when tracing was stopped
   101  	timeStart     int64     // nanotime when tracing was started
   102  	timeEnd       int64     // nanotime when tracing was stopped
   103  	reading       *traceBuf // buffer currently handed off to user
   104  	empty         *traceBuf // stack of empty buffers
   105  	fullHead      *traceBuf // queue of full buffers
   106  	fullTail      *traceBuf
   107  	reader        *g              // goroutine that called ReadTrace, or nil
   108  	stackTab      traceStackTable // maps stack traces to unique ids
   109  
   110  	bufLock mutex     // protects buf
   111  	buf     *traceBuf // global trace buffer, used when running without a p
   112  }
   113  
   114  var traceseq uint64 // global trace sequence number
   115  
   116  // tracestamp returns a consistent sequence number, time stamp pair
   117  // for use in a trace. We need to make sure that time stamp ordering
   118  // (assuming synchronized CPUs) and sequence ordering match.
   119  // To do that, we increment traceseq, grab ticks, and increment traceseq again.
   120  // We treat odd traceseq as a sign that another thread is in the middle
   121  // of the sequence and spin until it is done.
   122  // Not splitting stack to avoid preemption, just in case the call sites
   123  // that used to call xadd64 and cputicks are sensitive to that.
   124  //go:nosplit
   125  func tracestamp() (seq uint64, ts int64) {
   126  	seq = atomicload64(&traceseq)
   127  	for seq&1 != 0 || !cas64(&traceseq, seq, seq+1) {
   128  		seq = atomicload64(&traceseq)
   129  	}
   130  	ts = cputicks()
   131  	atomicstore64(&traceseq, seq+2)
   132  	return seq >> 1, ts
   133  }
   134  
   135  // traceBufHeader is per-P tracing buffer.
   136  type traceBufHeader struct {
   137  	link      *traceBuf               // in trace.empty/full
   138  	lastSeq   uint64                  // sequence number of last event
   139  	lastTicks uint64                  // when we wrote the last event
   140  	buf       []byte                  // trace data, always points to traceBuf.arr
   141  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   142  }
   143  
   144  // traceBuf is per-P tracing buffer.
   145  type traceBuf struct {
   146  	traceBufHeader
   147  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   148  }
   149  
   150  // StartTrace enables tracing for the current process.
   151  // While tracing, the data will be buffered and available via ReadTrace.
   152  // StartTrace returns an error if tracing is already enabled.
   153  // Most clients should use the runtime/trace package or the testing package's
   154  // -test.trace flag instead of calling StartTrace directly.
   155  func StartTrace() error {
   156  	// Stop the world, so that we can take a consistent snapshot
   157  	// of all goroutines at the beginning of the trace.
   158  	stopTheWorld("start tracing")
   159  
   160  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   161  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   162  	// and decide to write to trace at a random point in time.
   163  	// However, such syscall will use the global trace.buf buffer, because we've
   164  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   165  	lock(&trace.bufLock)
   166  
   167  	if trace.enabled || trace.shutdown {
   168  		unlock(&trace.bufLock)
   169  		startTheWorld()
   170  		return errorString("tracing is already enabled")
   171  	}
   172  
   173  	trace.seqStart, trace.ticksStart = tracestamp()
   174  	trace.timeStart = nanotime()
   175  	trace.headerWritten = false
   176  	trace.footerWritten = false
   177  
   178  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   179  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   180  	// That would lead to an inconsistent trace:
   181  	// - either GoSysExit appears before EvGoInSyscall,
   182  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   183  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   184  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   185  	_g_ := getg()
   186  	_g_.m.startingtrace = true
   187  	for _, gp := range allgs {
   188  		status := readgstatus(gp)
   189  		if status != _Gdead {
   190  			traceGoCreate(gp, gp.startpc)
   191  		}
   192  		if status == _Gwaiting {
   193  			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   194  		}
   195  		if status == _Gsyscall {
   196  			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   197  		} else {
   198  			gp.sysblocktraced = false
   199  		}
   200  	}
   201  	traceProcStart()
   202  	traceGoStart()
   203  	_g_.m.startingtrace = false
   204  	trace.enabled = true
   205  
   206  	unlock(&trace.bufLock)
   207  
   208  	startTheWorld()
   209  	return nil
   210  }
   211  
   212  // StopTrace stops tracing, if it was previously enabled.
   213  // StopTrace only returns after all the reads for the trace have completed.
   214  func StopTrace() {
   215  	// Stop the world so that we can collect the trace buffers from all p's below,
   216  	// and also to avoid races with traceEvent.
   217  	stopTheWorld("stop tracing")
   218  
   219  	// See the comment in StartTrace.
   220  	lock(&trace.bufLock)
   221  
   222  	if !trace.enabled {
   223  		unlock(&trace.bufLock)
   224  		startTheWorld()
   225  		return
   226  	}
   227  
   228  	traceGoSched()
   229  
   230  	for _, p := range &allp {
   231  		if p == nil {
   232  			break
   233  		}
   234  		buf := p.tracebuf
   235  		if buf != nil {
   236  			traceFullQueue(buf)
   237  			p.tracebuf = nil
   238  		}
   239  	}
   240  	if trace.buf != nil && len(trace.buf.buf) != 0 {
   241  		buf := trace.buf
   242  		trace.buf = nil
   243  		traceFullQueue(buf)
   244  	}
   245  
   246  	for {
   247  		trace.ticksEnd = cputicks()
   248  		trace.timeEnd = nanotime()
   249  		// Windows time can tick only every 15ms, wait for at least one tick.
   250  		if trace.timeEnd != trace.timeStart {
   251  			break
   252  		}
   253  		osyield()
   254  	}
   255  
   256  	trace.enabled = false
   257  	trace.shutdown = true
   258  	trace.stackTab.dump()
   259  
   260  	unlock(&trace.bufLock)
   261  
   262  	startTheWorld()
   263  
   264  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   265  	// Wait for the trace reader to flush pending buffers and stop.
   266  	semacquire(&trace.shutdownSema, false)
   267  	if raceenabled {
   268  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   269  	}
   270  
   271  	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   272  	lock(&trace.lock)
   273  	for _, p := range &allp {
   274  		if p == nil {
   275  			break
   276  		}
   277  		if p.tracebuf != nil {
   278  			throw("trace: non-empty trace buffer in proc")
   279  		}
   280  	}
   281  	if trace.buf != nil {
   282  		throw("trace: non-empty global trace buffer")
   283  	}
   284  	if trace.fullHead != nil || trace.fullTail != nil {
   285  		throw("trace: non-empty full trace buffer")
   286  	}
   287  	if trace.reading != nil || trace.reader != nil {
   288  		throw("trace: reading after shutdown")
   289  	}
   290  	for trace.empty != nil {
   291  		buf := trace.empty
   292  		trace.empty = buf.link
   293  		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
   294  	}
   295  	trace.shutdown = false
   296  	unlock(&trace.lock)
   297  }
   298  
   299  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   300  // is available. If tracing is turned off and all the data accumulated while it
   301  // was on has been returned, ReadTrace returns nil. The caller must copy the
   302  // returned data before calling ReadTrace again.
   303  // ReadTrace must be called from one goroutine at a time.
   304  func ReadTrace() []byte {
   305  	// This function may need to lock trace.lock recursively
   306  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   307  	// To allow this we use trace.lockOwner.
   308  	// Also this function must not allocate while holding trace.lock:
   309  	// allocation can call heap allocate, which will try to emit a trace
   310  	// event while holding heap lock.
   311  	lock(&trace.lock)
   312  	trace.lockOwner = getg()
   313  
   314  	if trace.reader != nil {
   315  		// More than one goroutine reads trace. This is bad.
   316  		// But we rather do not crash the program because of tracing,
   317  		// because tracing can be enabled at runtime on prod servers.
   318  		trace.lockOwner = nil
   319  		unlock(&trace.lock)
   320  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   321  		return nil
   322  	}
   323  	// Recycle the old buffer.
   324  	if buf := trace.reading; buf != nil {
   325  		buf.link = trace.empty
   326  		trace.empty = buf
   327  		trace.reading = nil
   328  	}
   329  	// Write trace header.
   330  	if !trace.headerWritten {
   331  		trace.headerWritten = true
   332  		trace.lockOwner = nil
   333  		unlock(&trace.lock)
   334  		return []byte("go 1.5 trace\x00\x00\x00\x00")
   335  	}
   336  	// Wait for new data.
   337  	if trace.fullHead == nil && !trace.shutdown {
   338  		trace.reader = getg()
   339  		goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
   340  		lock(&trace.lock)
   341  	}
   342  	// Write a buffer.
   343  	if trace.fullHead != nil {
   344  		buf := traceFullDequeue()
   345  		trace.reading = buf
   346  		trace.lockOwner = nil
   347  		unlock(&trace.lock)
   348  		return buf.buf
   349  	}
   350  	// Write footer with timer frequency.
   351  	if !trace.footerWritten {
   352  		trace.footerWritten = true
   353  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   354  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   355  		trace.lockOwner = nil
   356  		unlock(&trace.lock)
   357  		var data []byte
   358  		data = append(data, traceEvFrequency|0<<traceArgCountShift)
   359  		data = traceAppend(data, uint64(freq))
   360  		data = traceAppend(data, 0)
   361  		if timers.gp != nil {
   362  			data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
   363  			data = traceAppend(data, uint64(timers.gp.goid))
   364  			data = traceAppend(data, 0)
   365  		}
   366  		return data
   367  	}
   368  	// Done.
   369  	if trace.shutdown {
   370  		trace.lockOwner = nil
   371  		unlock(&trace.lock)
   372  		if raceenabled {
   373  			// Model synchronization on trace.shutdownSema, which race
   374  			// detector does not see. This is required to avoid false
   375  			// race reports on writer passed to trace.Start.
   376  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   377  		}
   378  		// trace.enabled is already reset, so can call traceable functions.
   379  		semrelease(&trace.shutdownSema)
   380  		return nil
   381  	}
   382  	// Also bad, but see the comment above.
   383  	trace.lockOwner = nil
   384  	unlock(&trace.lock)
   385  	println("runtime: spurious wakeup of trace reader")
   386  	return nil
   387  }
   388  
   389  // traceReader returns the trace reader that should be woken up, if any.
   390  func traceReader() *g {
   391  	if trace.reader == nil || (trace.fullHead == nil && !trace.shutdown) {
   392  		return nil
   393  	}
   394  	lock(&trace.lock)
   395  	if trace.reader == nil || (trace.fullHead == nil && !trace.shutdown) {
   396  		unlock(&trace.lock)
   397  		return nil
   398  	}
   399  	gp := trace.reader
   400  	trace.reader = nil
   401  	unlock(&trace.lock)
   402  	return gp
   403  }
   404  
   405  // traceProcFree frees trace buffer associated with pp.
   406  func traceProcFree(pp *p) {
   407  	buf := pp.tracebuf
   408  	pp.tracebuf = nil
   409  	if buf == nil {
   410  		return
   411  	}
   412  	lock(&trace.lock)
   413  	traceFullQueue(buf)
   414  	unlock(&trace.lock)
   415  }
   416  
   417  // traceFullQueue queues buf into queue of full buffers.
   418  func traceFullQueue(buf *traceBuf) {
   419  	buf.link = nil
   420  	if trace.fullHead == nil {
   421  		trace.fullHead = buf
   422  	} else {
   423  		trace.fullTail.link = buf
   424  	}
   425  	trace.fullTail = buf
   426  }
   427  
   428  // traceFullDequeue dequeues from queue of full buffers.
   429  func traceFullDequeue() *traceBuf {
   430  	buf := trace.fullHead
   431  	if buf == nil {
   432  		return nil
   433  	}
   434  	trace.fullHead = buf.link
   435  	if trace.fullHead == nil {
   436  		trace.fullTail = nil
   437  	}
   438  	buf.link = nil
   439  	return buf
   440  }
   441  
   442  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   443  // ev is event type.
   444  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   445  // If skip = 0, this event type should contain a stack, but we don't want
   446  // to collect and remember it for this particular call.
   447  func traceEvent(ev byte, skip int, args ...uint64) {
   448  	mp, pid, bufp := traceAcquireBuffer()
   449  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   450  	// This protects from races between traceEvent and StartTrace/StopTrace.
   451  
   452  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   453  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   454  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   455  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   456  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   457  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   458  	if !trace.enabled && !mp.startingtrace {
   459  		traceReleaseBuffer(pid)
   460  		return
   461  	}
   462  	buf := *bufp
   463  	const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
   464  	if buf == nil || cap(buf.buf)-len(buf.buf) < maxSize {
   465  		buf = traceFlush(buf)
   466  		*bufp = buf
   467  	}
   468  
   469  	seq, ticksraw := tracestamp()
   470  	seqDiff := seq - buf.lastSeq
   471  	ticks := uint64(ticksraw) / traceTickDiv
   472  	tickDiff := ticks - buf.lastTicks
   473  	if len(buf.buf) == 0 {
   474  		data := buf.buf
   475  		data = append(data, traceEvBatch|1<<traceArgCountShift)
   476  		data = traceAppend(data, uint64(pid))
   477  		data = traceAppend(data, seq)
   478  		data = traceAppend(data, ticks)
   479  		buf.buf = data
   480  		seqDiff = 0
   481  		tickDiff = 0
   482  	}
   483  	buf.lastSeq = seq
   484  	buf.lastTicks = ticks
   485  	narg := byte(len(args))
   486  	if skip >= 0 {
   487  		narg++
   488  	}
   489  	// We have only 2 bits for number of arguments.
   490  	// If number is >= 3, then the event type is followed by event length in bytes.
   491  	if narg > 3 {
   492  		narg = 3
   493  	}
   494  	data := buf.buf
   495  	data = append(data, ev|narg<<traceArgCountShift)
   496  	var lenp *byte
   497  	if narg == 3 {
   498  		// Reserve the byte for length assuming that length < 128.
   499  		data = append(data, 0)
   500  		lenp = &data[len(data)-1]
   501  	}
   502  	data = traceAppend(data, seqDiff)
   503  	data = traceAppend(data, tickDiff)
   504  	for _, a := range args {
   505  		data = traceAppend(data, a)
   506  	}
   507  	if skip == 0 {
   508  		data = append(data, 0)
   509  	} else if skip > 0 {
   510  		_g_ := getg()
   511  		gp := mp.curg
   512  		var nstk int
   513  		if gp == _g_ {
   514  			nstk = callers(skip, buf.stk[:])
   515  		} else if gp != nil {
   516  			gp = mp.curg
   517  			nstk = gcallers(gp, skip, buf.stk[:])
   518  		}
   519  		if nstk > 0 {
   520  			nstk-- // skip runtime.goexit
   521  		}
   522  		if nstk > 0 && gp.goid == 1 {
   523  			nstk-- // skip runtime.main
   524  		}
   525  		id := trace.stackTab.put(buf.stk[:nstk])
   526  		data = traceAppend(data, uint64(id))
   527  	}
   528  	evSize := len(data) - len(buf.buf)
   529  	if evSize > maxSize {
   530  		throw("invalid length of trace event")
   531  	}
   532  	if lenp != nil {
   533  		// Fill in actual length.
   534  		*lenp = byte(evSize - 2)
   535  	}
   536  	buf.buf = data
   537  	traceReleaseBuffer(pid)
   538  }
   539  
   540  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   541  func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) {
   542  	mp = acquirem()
   543  	if p := mp.p.ptr(); p != nil {
   544  		return mp, p.id, &p.tracebuf
   545  	}
   546  	lock(&trace.bufLock)
   547  	return mp, traceGlobProc, &trace.buf
   548  }
   549  
   550  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   551  func traceReleaseBuffer(pid int32) {
   552  	if pid == traceGlobProc {
   553  		unlock(&trace.bufLock)
   554  	}
   555  	releasem(getg().m)
   556  }
   557  
   558  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   559  func traceFlush(buf *traceBuf) *traceBuf {
   560  	owner := trace.lockOwner
   561  	dolock := owner == nil || owner != getg().m.curg
   562  	if dolock {
   563  		lock(&trace.lock)
   564  	}
   565  	if buf != nil {
   566  		if &buf.buf[0] != &buf.arr[0] {
   567  			throw("trace buffer overflow")
   568  		}
   569  		traceFullQueue(buf)
   570  	}
   571  	if trace.empty != nil {
   572  		buf = trace.empty
   573  		trace.empty = buf.link
   574  	} else {
   575  		buf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   576  		if buf == nil {
   577  			throw("trace: out of memory")
   578  		}
   579  	}
   580  	buf.link = nil
   581  	buf.buf = buf.arr[:0]
   582  	buf.lastTicks = 0
   583  	if dolock {
   584  		unlock(&trace.lock)
   585  	}
   586  	return buf
   587  }
   588  
   589  // traceAppend appends v to buf in little-endian-base-128 encoding.
   590  func traceAppend(buf []byte, v uint64) []byte {
   591  	for ; v >= 0x80; v >>= 7 {
   592  		buf = append(buf, 0x80|byte(v))
   593  	}
   594  	buf = append(buf, byte(v))
   595  	return buf
   596  }
   597  
   598  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   599  // It is lock-free for reading.
   600  type traceStackTable struct {
   601  	lock mutex
   602  	seq  uint32
   603  	mem  traceAlloc
   604  	tab  [1 << 13]*traceStack
   605  }
   606  
   607  // traceStack is a single stack in traceStackTable.
   608  type traceStack struct {
   609  	link *traceStack
   610  	hash uintptr
   611  	id   uint32
   612  	n    int
   613  	stk  [0]uintptr // real type [n]uintptr
   614  }
   615  
   616  // stack returns slice of PCs.
   617  func (ts *traceStack) stack() []uintptr {
   618  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   619  }
   620  
   621  // put returns a unique id for the stack trace pcs and caches it in the table,
   622  // if it sees the trace for the first time.
   623  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   624  	if len(pcs) == 0 {
   625  		return 0
   626  	}
   627  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   628  	// First, search the hashtable w/o the mutex.
   629  	if id := tab.find(pcs, hash); id != 0 {
   630  		return id
   631  	}
   632  	// Now, double check under the mutex.
   633  	lock(&tab.lock)
   634  	if id := tab.find(pcs, hash); id != 0 {
   635  		unlock(&tab.lock)
   636  		return id
   637  	}
   638  	// Create new record.
   639  	tab.seq++
   640  	stk := tab.newStack(len(pcs))
   641  	stk.hash = hash
   642  	stk.id = tab.seq
   643  	stk.n = len(pcs)
   644  	stkpc := stk.stack()
   645  	for i, pc := range pcs {
   646  		stkpc[i] = pc
   647  	}
   648  	part := int(hash % uintptr(len(tab.tab)))
   649  	stk.link = tab.tab[part]
   650  	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   651  	unlock(&tab.lock)
   652  	return stk.id
   653  }
   654  
   655  // find checks if the stack trace pcs is already present in the table.
   656  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   657  	part := int(hash % uintptr(len(tab.tab)))
   658  Search:
   659  	for stk := tab.tab[part]; stk != nil; stk = stk.link {
   660  		if stk.hash == hash && stk.n == len(pcs) {
   661  			for i, stkpc := range stk.stack() {
   662  				if stkpc != pcs[i] {
   663  					continue Search
   664  				}
   665  			}
   666  			return stk.id
   667  		}
   668  	}
   669  	return 0
   670  }
   671  
   672  // newStack allocates a new stack of size n.
   673  func (tab *traceStackTable) newStack(n int) *traceStack {
   674  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*ptrSize))
   675  }
   676  
   677  // dump writes all previously cached stacks to trace buffers,
   678  // releases all memory and resets state.
   679  func (tab *traceStackTable) dump() {
   680  	var tmp [(2 + traceStackSize) * traceBytesPerNumber]byte
   681  	buf := traceFlush(nil)
   682  	for _, stk := range tab.tab {
   683  		for ; stk != nil; stk = stk.link {
   684  			maxSize := 1 + (3+stk.n)*traceBytesPerNumber
   685  			if cap(buf.buf)-len(buf.buf) < maxSize {
   686  				buf = traceFlush(buf)
   687  			}
   688  			// Form the event in the temp buffer, we need to know the actual length.
   689  			tmpbuf := tmp[:0]
   690  			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   691  			tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
   692  			for _, pc := range stk.stack() {
   693  				tmpbuf = traceAppend(tmpbuf, uint64(pc))
   694  			}
   695  			// Now copy to the buffer.
   696  			data := buf.buf
   697  			data = append(data, traceEvStack|3<<traceArgCountShift)
   698  			data = traceAppend(data, uint64(len(tmpbuf)))
   699  			data = append(data, tmpbuf...)
   700  			buf.buf = data
   701  		}
   702  	}
   703  
   704  	lock(&trace.lock)
   705  	traceFullQueue(buf)
   706  	unlock(&trace.lock)
   707  
   708  	tab.mem.drop()
   709  	*tab = traceStackTable{}
   710  }
   711  
   712  // traceAlloc is a non-thread-safe region allocator.
   713  // It holds a linked list of traceAllocBlock.
   714  type traceAlloc struct {
   715  	head *traceAllocBlock
   716  	off  uintptr
   717  }
   718  
   719  // traceAllocBlock is a block in traceAlloc.
   720  type traceAllocBlock struct {
   721  	next *traceAllocBlock
   722  	data [64<<10 - ptrSize]byte
   723  }
   724  
   725  // alloc allocates n-byte block.
   726  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   727  	n = round(n, ptrSize)
   728  	if a.head == nil || a.off+n > uintptr(len(a.head.data)) {
   729  		if n > uintptr(len(a.head.data)) {
   730  			throw("trace: alloc too large")
   731  		}
   732  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   733  		if block == nil {
   734  			throw("trace: out of memory")
   735  		}
   736  		block.next = a.head
   737  		a.head = block
   738  		a.off = 0
   739  	}
   740  	p := &a.head.data[a.off]
   741  	a.off += n
   742  	return unsafe.Pointer(p)
   743  }
   744  
   745  // drop frees all previously allocated memory and resets the allocator.
   746  func (a *traceAlloc) drop() {
   747  	for a.head != nil {
   748  		block := a.head
   749  		a.head = block.next
   750  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   751  	}
   752  }
   753  
   754  // The following functions write specific events to trace.
   755  
   756  func traceGomaxprocs(procs int32) {
   757  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   758  }
   759  
   760  func traceProcStart() {
   761  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   762  }
   763  
   764  func traceProcStop(pp *p) {
   765  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   766  	// to handle this we temporary employ the P.
   767  	mp := acquirem()
   768  	oldp := mp.p
   769  	mp.p.set(pp)
   770  	traceEvent(traceEvProcStop, -1)
   771  	mp.p = oldp
   772  	releasem(mp)
   773  }
   774  
   775  func traceGCStart() {
   776  	traceEvent(traceEvGCStart, 4)
   777  }
   778  
   779  func traceGCDone() {
   780  	traceEvent(traceEvGCDone, -1)
   781  }
   782  
   783  func traceGCScanStart() {
   784  	traceEvent(traceEvGCScanStart, -1)
   785  }
   786  
   787  func traceGCScanDone() {
   788  	traceEvent(traceEvGCScanDone, -1)
   789  }
   790  
   791  func traceGCSweepStart() {
   792  	traceEvent(traceEvGCSweepStart, 1)
   793  }
   794  
   795  func traceGCSweepDone() {
   796  	traceEvent(traceEvGCSweepDone, -1)
   797  }
   798  
   799  func traceGoCreate(newg *g, pc uintptr) {
   800  	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(pc))
   801  }
   802  
   803  func traceGoStart() {
   804  	traceEvent(traceEvGoStart, -1, uint64(getg().m.curg.goid))
   805  }
   806  
   807  func traceGoEnd() {
   808  	traceEvent(traceEvGoEnd, -1)
   809  }
   810  
   811  func traceGoSched() {
   812  	traceEvent(traceEvGoSched, 1)
   813  }
   814  
   815  func traceGoPreempt() {
   816  	traceEvent(traceEvGoPreempt, 1)
   817  }
   818  
   819  func traceGoPark(traceEv byte, skip int, gp *g) {
   820  	if traceEv&traceFutileWakeup != 0 {
   821  		traceEvent(traceEvFutileWakeup, -1)
   822  	}
   823  	traceEvent(traceEv & ^traceFutileWakeup, skip)
   824  }
   825  
   826  func traceGoUnpark(gp *g, skip int) {
   827  	traceEvent(traceEvGoUnblock, skip, uint64(gp.goid))
   828  }
   829  
   830  func traceGoSysCall() {
   831  	traceEvent(traceEvGoSysCall, 1)
   832  }
   833  
   834  func traceGoSysExit(seq uint64, ts int64) {
   835  	if int64(seq)-int64(trace.seqStart) < 0 {
   836  		// The timestamp was obtained during a previous tracing session, ignore.
   837  		return
   838  	}
   839  	traceEvent(traceEvGoSysExit, -1, uint64(getg().m.curg.goid), seq, uint64(ts)/traceTickDiv)
   840  }
   841  
   842  func traceGoSysBlock(pp *p) {
   843  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
   844  	// to handle this we temporary employ the P.
   845  	mp := acquirem()
   846  	oldp := mp.p
   847  	mp.p.set(pp)
   848  	traceEvent(traceEvGoSysBlock, -1)
   849  	mp.p = oldp
   850  	releasem(mp)
   851  }
   852  
   853  func traceHeapAlloc() {
   854  	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
   855  }
   856  
   857  func traceNextGC() {
   858  	traceEvent(traceEvNextGC, -1, memstats.next_gc)
   859  }