github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/cpuprof.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // CPU profiling.
     6  // Based on algorithms and data structures used in
     7  // http://code.google.com/p/google-perftools/.
     8  //
     9  // The main difference between this code and the google-perftools
    10  // code is that this code is written to allow copying the profile data
    11  // to an arbitrary io.Writer, while the google-perftools code always
    12  // writes to an operating system file.
    13  //
    14  // The signal handler for the profiling clock tick adds a new stack trace
    15  // to a hash table tracking counts for recent traces. Most clock ticks
    16  // hit in the cache. In the event of a cache miss, an entry must be
    17  // evicted from the hash table, copied to a log that will eventually be
    18  // written as profile data. The google-perftools code flushed the
    19  // log itself during the signal handler. This code cannot do that, because
    20  // the io.Writer might block or need system calls or locks that are not
    21  // safe to use from within the signal handler. Instead, we split the log
    22  // into two halves and let the signal handler fill one half while a goroutine
    23  // is writing out the other half. When the signal handler fills its half, it
    24  // offers to swap with the goroutine. If the writer is not done with its half,
    25  // we lose the stack trace for this clock tick (and record that loss).
    26  // The goroutine interacts with the signal handler by calling getprofile() to
    27  // get the next log piece to write, implicitly handing back the last log
    28  // piece it obtained.
    29  //
    30  // The state of this dance between the signal handler and the goroutine
    31  // is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
    32  // is not using either log half and is waiting (or will soon be waiting) for
    33  // a new piece by calling notesleep(&p.wait).  If the signal handler
    34  // changes handoff from 0 to non-zero, it must call notewakeup(&p.wait)
    35  // to wake the goroutine. The value indicates the number of entries in the
    36  // log half being handed off. The goroutine leaves the non-zero value in
    37  // place until it has finished processing the log half and then flips the number
    38  // back to zero. Setting the high bit in handoff means that the profiling is over,
    39  // and the goroutine is now in charge of flushing the data left in the hash table
    40  // to the log and returning that data.
    41  //
    42  // The handoff field is manipulated using atomic operations.
    43  // For the most part, the manipulation of handoff is orderly: if handoff == 0
    44  // then the signal handler owns it and can change it to non-zero.
    45  // If handoff != 0 then the goroutine owns it and can change it to zero.
    46  // If that were the end of the story then we would not need to manipulate
    47  // handoff using atomic operations. The operations are needed, however,
    48  // in order to let the log closer set the high bit to indicate "EOF" safely
    49  // in the situation when normally the goroutine "owns" handoff.
    50  
    51  package runtime
    52  
    53  import (
    54  	"runtime/internal/atomic"
    55  	"unsafe"
    56  )
    57  
    58  const (
    59  	numBuckets      = 1 << 10
    60  	logSize         = 1 << 17
    61  	assoc           = 4
    62  	maxCPUProfStack = 64
    63  )
    64  
    65  type cpuprofEntry struct {
    66  	count uintptr
    67  	depth int
    68  	stack [maxCPUProfStack]uintptr
    69  }
    70  
    71  type cpuProfile struct {
    72  	on     bool    // profiling is on
    73  	wait   note    // goroutine waits here
    74  	count  uintptr // tick count
    75  	evicts uintptr // eviction count
    76  	lost   uintptr // lost ticks that need to be logged
    77  
    78  	// Active recent stack traces.
    79  	hash [numBuckets]struct {
    80  		entry [assoc]cpuprofEntry
    81  	}
    82  
    83  	// Log of traces evicted from hash.
    84  	// Signal handler has filled log[toggle][:nlog].
    85  	// Goroutine is writing log[1-toggle][:handoff].
    86  	log     [2][logSize / 2]uintptr
    87  	nlog    int
    88  	toggle  int32
    89  	handoff uint32
    90  
    91  	// Writer state.
    92  	// Writer maintains its own toggle to avoid races
    93  	// looking at signal handler's toggle.
    94  	wtoggle  uint32
    95  	wholding bool // holding & need to release a log half
    96  	flushing bool // flushing hash table - profile is over
    97  	eodSent  bool // special end-of-data record sent; => flushing
    98  }
    99  
   100  var (
   101  	cpuprofLock mutex
   102  	cpuprof     *cpuProfile
   103  
   104  	eod = [3]uintptr{0, 1, 0}
   105  )
   106  
   107  func setcpuprofilerate(hz int32) {
   108  	systemstack(func() {
   109  		setcpuprofilerate_m(hz)
   110  	})
   111  }
   112  
   113  // lostProfileData is a no-op function used in profiles
   114  // to mark the number of profiling stack traces that were
   115  // discarded due to slow data writers.
   116  func lostProfileData() {}
   117  
   118  // SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
   119  // If hz <= 0, SetCPUProfileRate turns off profiling.
   120  // If the profiler is on, the rate cannot be changed without first turning it off.
   121  //
   122  // Most clients should use the runtime/pprof package or
   123  // the testing package's -test.cpuprofile flag instead of calling
   124  // SetCPUProfileRate directly.
   125  func SetCPUProfileRate(hz int) {
   126  	// Clamp hz to something reasonable.
   127  	if hz < 0 {
   128  		hz = 0
   129  	}
   130  	if hz > 1000000 {
   131  		hz = 1000000
   132  	}
   133  
   134  	lock(&cpuprofLock)
   135  	if hz > 0 {
   136  		if cpuprof == nil {
   137  			cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys))
   138  			if cpuprof == nil {
   139  				print("runtime: cpu profiling cannot allocate memory\n")
   140  				unlock(&cpuprofLock)
   141  				return
   142  			}
   143  		}
   144  		if cpuprof.on || cpuprof.handoff != 0 {
   145  			print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
   146  			unlock(&cpuprofLock)
   147  			return
   148  		}
   149  
   150  		cpuprof.on = true
   151  		// pprof binary header format.
   152  		// https://github.com/gperftools/gperftools/blob/master/src/profiledata.cc#L119
   153  		p := &cpuprof.log[0]
   154  		p[0] = 0                 // count for header
   155  		p[1] = 3                 // depth for header
   156  		p[2] = 0                 // version number
   157  		p[3] = uintptr(1e6 / hz) // period (microseconds)
   158  		p[4] = 0
   159  		cpuprof.nlog = 5
   160  		cpuprof.toggle = 0
   161  		cpuprof.wholding = false
   162  		cpuprof.wtoggle = 0
   163  		cpuprof.flushing = false
   164  		cpuprof.eodSent = false
   165  		noteclear(&cpuprof.wait)
   166  
   167  		setcpuprofilerate(int32(hz))
   168  	} else if cpuprof != nil && cpuprof.on {
   169  		setcpuprofilerate(0)
   170  		cpuprof.on = false
   171  
   172  		// Now add is not running anymore, and getprofile owns the entire log.
   173  		// Set the high bit in cpuprof.handoff to tell getprofile.
   174  		for {
   175  			n := cpuprof.handoff
   176  			if n&0x80000000 != 0 {
   177  				print("runtime: setcpuprofile(off) twice\n")
   178  			}
   179  			if atomic.Cas(&cpuprof.handoff, n, n|0x80000000) {
   180  				if n == 0 {
   181  					// we did the transition from 0 -> nonzero so we wake getprofile
   182  					notewakeup(&cpuprof.wait)
   183  				}
   184  				break
   185  			}
   186  		}
   187  	}
   188  	unlock(&cpuprofLock)
   189  }
   190  
   191  // add adds the stack trace to the profile.
   192  // It is called from signal handlers and other limited environments
   193  // and cannot allocate memory or acquire locks that might be
   194  // held at the time of the signal, nor can it use substantial amounts
   195  // of stack. It is allowed to call evict.
   196  //go:nowritebarrierrec
   197  func (p *cpuProfile) add(pc []uintptr) {
   198  	p.addWithFlushlog(pc, p.flushlog)
   199  }
   200  
   201  // addWithFlushlog implements add and addNonGo.
   202  // It is called from signal handlers and other limited environments
   203  // and cannot allocate memory or acquire locks that might be
   204  // held at the time of the signal, nor can it use substantial amounts
   205  // of stack. It may be called by a signal handler with no g or m.
   206  // It is allowed to call evict, passing the flushlog parameter.
   207  //go:nosplit
   208  //go:nowritebarrierrec
   209  func (p *cpuProfile) addWithFlushlog(pc []uintptr, flushlog func() bool) {
   210  	if len(pc) > maxCPUProfStack {
   211  		pc = pc[:maxCPUProfStack]
   212  	}
   213  
   214  	// Compute hash.
   215  	h := uintptr(0)
   216  	for _, x := range pc {
   217  		h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
   218  		h += x * 41
   219  	}
   220  	p.count++
   221  
   222  	// Add to entry count if already present in table.
   223  	b := &p.hash[h%numBuckets]
   224  Assoc:
   225  	for i := range b.entry {
   226  		e := &b.entry[i]
   227  		if e.depth != len(pc) {
   228  			continue
   229  		}
   230  		for j := range pc {
   231  			if e.stack[j] != pc[j] {
   232  				continue Assoc
   233  			}
   234  		}
   235  		e.count++
   236  		return
   237  	}
   238  
   239  	// Evict entry with smallest count.
   240  	var e *cpuprofEntry
   241  	for i := range b.entry {
   242  		if e == nil || b.entry[i].count < e.count {
   243  			e = &b.entry[i]
   244  		}
   245  	}
   246  	if e.count > 0 {
   247  		if !p.evict(e, flushlog) {
   248  			// Could not evict entry. Record lost stack.
   249  			p.lost++
   250  			return
   251  		}
   252  		p.evicts++
   253  	}
   254  
   255  	// Reuse the newly evicted entry.
   256  	e.depth = len(pc)
   257  	e.count = 1
   258  	copy(e.stack[:], pc)
   259  }
   260  
   261  // evict copies the given entry's data into the log, so that
   262  // the entry can be reused.  evict is called from add, which
   263  // is called from the profiling signal handler, so it must not
   264  // allocate memory or block, and it may be called with no g or m.
   265  // It is safe to call flushlog. evict returns true if the entry was
   266  // copied to the log, false if there was no room available.
   267  //go:nosplit
   268  //go:nowritebarrierrec
   269  func (p *cpuProfile) evict(e *cpuprofEntry, flushlog func() bool) bool {
   270  	d := e.depth
   271  	nslot := d + 2
   272  	log := &p.log[p.toggle]
   273  	if p.nlog+nslot > len(log) {
   274  		if !flushlog() {
   275  			return false
   276  		}
   277  		log = &p.log[p.toggle]
   278  	}
   279  
   280  	q := p.nlog
   281  	log[q] = e.count
   282  	q++
   283  	log[q] = uintptr(d)
   284  	q++
   285  	copy(log[q:], e.stack[:d])
   286  	q += d
   287  	p.nlog = q
   288  	e.count = 0
   289  	return true
   290  }
   291  
   292  // flushlog tries to flush the current log and switch to the other one.
   293  // flushlog is called from evict, called from add, called from the signal handler,
   294  // so it cannot allocate memory or block. It can try to swap logs with
   295  // the writing goroutine, as explained in the comment at the top of this file.
   296  //go:nowritebarrierrec
   297  func (p *cpuProfile) flushlog() bool {
   298  	if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) {
   299  		return false
   300  	}
   301  	notewakeup(&p.wait)
   302  
   303  	p.toggle = 1 - p.toggle
   304  	log := &p.log[p.toggle]
   305  	q := 0
   306  	if p.lost > 0 {
   307  		lostPC := funcPC(lostProfileData)
   308  		log[0] = p.lost
   309  		log[1] = 1
   310  		log[2] = lostPC
   311  		q = 3
   312  		p.lost = 0
   313  	}
   314  	p.nlog = q
   315  	return true
   316  }
   317  
   318  // addNonGo is like add, but runs on a non-Go thread.
   319  // It can't do anything that might need a g or an m.
   320  // With this entry point, we don't try to flush the log when evicting an
   321  // old entry. Instead, we just drop the stack trace if we're out of space.
   322  //go:nosplit
   323  //go:nowritebarrierrec
   324  func (p *cpuProfile) addNonGo(pc []uintptr) {
   325  	p.addWithFlushlog(pc, func() bool { return false })
   326  }
   327  
   328  // getprofile blocks until the next block of profiling data is available
   329  // and returns it as a []byte. It is called from the writing goroutine.
   330  func (p *cpuProfile) getprofile() []byte {
   331  	if p == nil {
   332  		return nil
   333  	}
   334  
   335  	if p.wholding {
   336  		// Release previous log to signal handling side.
   337  		// Loop because we are racing against SetCPUProfileRate(0).
   338  		for {
   339  			n := p.handoff
   340  			if n == 0 {
   341  				print("runtime: phase error during cpu profile handoff\n")
   342  				return nil
   343  			}
   344  			if n&0x80000000 != 0 {
   345  				p.wtoggle = 1 - p.wtoggle
   346  				p.wholding = false
   347  				p.flushing = true
   348  				goto Flush
   349  			}
   350  			if atomic.Cas(&p.handoff, n, 0) {
   351  				break
   352  			}
   353  		}
   354  		p.wtoggle = 1 - p.wtoggle
   355  		p.wholding = false
   356  	}
   357  
   358  	if p.flushing {
   359  		goto Flush
   360  	}
   361  
   362  	if !p.on && p.handoff == 0 {
   363  		return nil
   364  	}
   365  
   366  	// Wait for new log.
   367  	notetsleepg(&p.wait, -1)
   368  	noteclear(&p.wait)
   369  
   370  	switch n := p.handoff; {
   371  	case n == 0:
   372  		print("runtime: phase error during cpu profile wait\n")
   373  		return nil
   374  	case n == 0x80000000:
   375  		p.flushing = true
   376  		goto Flush
   377  	default:
   378  		n &^= 0x80000000
   379  
   380  		// Return new log to caller.
   381  		p.wholding = true
   382  
   383  		return uintptrBytes(p.log[p.wtoggle][:n])
   384  	}
   385  
   386  	// In flush mode.
   387  	// Add is no longer being called. We own the log.
   388  	// Also, p.handoff is non-zero, so flushlog will return false.
   389  	// Evict the hash table into the log and return it.
   390  Flush:
   391  	for i := range p.hash {
   392  		b := &p.hash[i]
   393  		for j := range b.entry {
   394  			e := &b.entry[j]
   395  			if e.count > 0 && !p.evict(e, p.flushlog) {
   396  				// Filled the log. Stop the loop and return what we've got.
   397  				break Flush
   398  			}
   399  		}
   400  	}
   401  
   402  	// Return pending log data.
   403  	if p.nlog > 0 {
   404  		// Note that we're using toggle now, not wtoggle,
   405  		// because we're working on the log directly.
   406  		n := p.nlog
   407  		p.nlog = 0
   408  		return uintptrBytes(p.log[p.toggle][:n])
   409  	}
   410  
   411  	// Made it through the table without finding anything to log.
   412  	if !p.eodSent {
   413  		// We may not have space to append this to the partial log buf,
   414  		// so we always return a new slice for the end-of-data marker.
   415  		p.eodSent = true
   416  		return uintptrBytes(eod[:])
   417  	}
   418  
   419  	// Finally done. Clean up and return nil.
   420  	p.flushing = false
   421  	if !atomic.Cas(&p.handoff, p.handoff, 0) {
   422  		print("runtime: profile flush racing with something\n")
   423  	}
   424  	return nil
   425  }
   426  
   427  func uintptrBytes(p []uintptr) (ret []byte) {
   428  	pp := (*slice)(unsafe.Pointer(&p))
   429  	rp := (*slice)(unsafe.Pointer(&ret))
   430  
   431  	rp.array = pp.array
   432  	rp.len = pp.len * int(unsafe.Sizeof(p[0]))
   433  	rp.cap = rp.len
   434  
   435  	return
   436  }
   437  
   438  // CPUProfile returns the next chunk of binary CPU profiling stack trace data,
   439  // blocking until data is available. If profiling is turned off and all the profile
   440  // data accumulated while it was on has been returned, CPUProfile returns nil.
   441  // The caller must save the returned data before calling CPUProfile again.
   442  //
   443  // Most clients should use the runtime/pprof package or
   444  // the testing package's -test.cpuprofile flag instead of calling
   445  // CPUProfile directly.
   446  func CPUProfile() []byte {
   447  	return cpuprof.getprofile()
   448  }
   449  
   450  //go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
   451  func runtime_pprof_runtime_cyclesPerSecond() int64 {
   452  	return tickspersecond()
   453  }