github.com/muesli/go@v0.0.0-20170208044820-e410d2a81ef2/src/runtime/pprof/pprof.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package pprof writes runtime profiling data in the format expected
     6  // by the pprof visualization tool.
     7  //
     8  // Profiling a Go program
     9  //
    10  // The first step to profiling a Go program is to enable profiling.
    11  // Support for profiling benchmarks built with the standard testing
    12  // package is built into go test. For example, the following command
    13  // runs benchmarks in the current directory and writes the CPU and
    14  // memory profiles to cpu.prof and mem.prof:
    15  //
    16  //     go test -cpuprofile cpu.prof -memprofile mem.prof -bench .
    17  //
    18  // To add equivalent profiling support to a standalone program, add
    19  // code like the following to your main function:
    20  //
    21  //    var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
    22  //    var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
    23  //
    24  //    func main() {
    25  //        flag.Parse()
    26  //        if *cpuprofile != "" {
    27  //            f, err := os.Create(*cpuprofile)
    28  //            if err != nil {
    29  //                log.Fatal("could not create CPU profile: ", err)
    30  //            }
    31  //            if err := pprof.StartCPUProfile(f); err != nil {
    32  //                log.Fatal("could not start CPU profile: ", err)
    33  //            }
    34  //            defer pprof.StopCPUProfile()
    35  //        }
    36  //
    37  //        // ... rest of the program ...
    38  //
    39  //        if *memprofile != "" {
    40  //            f, err := os.Create(*memprofile)
    41  //            if err != nil {
    42  //                log.Fatal("could not create memory profile: ", err)
    43  //            }
    44  //            runtime.GC() // get up-to-date statistics
    45  //            if err := pprof.WriteHeapProfile(f); err != nil {
    46  //                log.Fatal("could not write memory profile: ", err)
    47  //            }
    48  //            f.Close()
    49  //        }
    50  //    }
    51  //
    52  // There is also a standard HTTP interface to profiling data. Adding
    53  // the following line will install handlers under the /debug/pprof/
    54  // URL to download live profiles:
    55  //
    56  //    import _ "net/http/pprof"
    57  //
    58  // See the net/http/pprof package for more details.
    59  //
    60  // Profiles can then be visualized with the pprof tool:
    61  //
    62  //    go tool pprof cpu.prof
    63  //
    64  // There are many commands available from the pprof command line.
    65  // Commonly used commands include "top", which prints a summary of the
    66  // top program hot-spots, and "web", which opens an interactive graph
    67  // of hot-spots and their call graphs. Use "help" for information on
    68  // all pprof commands.
    69  //
    70  // For more information about pprof, see
    71  // https://github.com/google/pprof/blob/master/doc/pprof.md.
    72  package pprof
    73  
    74  import (
    75  	"bufio"
    76  	"bytes"
    77  	"fmt"
    78  	"internal/pprof/profile"
    79  	"io"
    80  	"runtime"
    81  	"runtime/pprof/internal/protopprof"
    82  	"sort"
    83  	"strings"
    84  	"sync"
    85  	"text/tabwriter"
    86  	"time"
    87  )
    88  
    89  // BUG(rsc): Profiles are only as good as the kernel support used to generate them.
    90  // See https://golang.org/issue/13841 for details about known problems.
    91  
    92  // A Profile is a collection of stack traces showing the call sequences
    93  // that led to instances of a particular event, such as allocation.
    94  // Packages can create and maintain their own profiles; the most common
    95  // use is for tracking resources that must be explicitly closed, such as files
    96  // or network connections.
    97  //
    98  // A Profile's methods can be called from multiple goroutines simultaneously.
    99  //
   100  // Each Profile has a unique name. A few profiles are predefined:
   101  //
   102  //	goroutine    - stack traces of all current goroutines
   103  //	heap         - a sampling of all heap allocations
   104  //	threadcreate - stack traces that led to the creation of new OS threads
   105  //	block        - stack traces that led to blocking on synchronization primitives
   106  //	mutex        - stack traces of holders of contended mutexes
   107  //
   108  // These predefined profiles maintain themselves and panic on an explicit
   109  // Add or Remove method call.
   110  //
   111  // The heap profile reports statistics as of the most recently completed
   112  // garbage collection; it elides more recent allocation to avoid skewing
   113  // the profile away from live data and toward garbage.
   114  // If there has been no garbage collection at all, the heap profile reports
   115  // all known allocations. This exception helps mainly in programs running
   116  // without garbage collection enabled, usually for debugging purposes.
   117  //
   118  // The CPU profile is not available as a Profile. It has a special API,
   119  // the StartCPUProfile and StopCPUProfile functions, because it streams
   120  // output to a writer during profiling.
   121  //
   122  type Profile struct {
   123  	name  string
   124  	mu    sync.Mutex
   125  	m     map[interface{}][]uintptr
   126  	count func() int
   127  	write func(io.Writer, int) error
   128  }
   129  
   130  // profiles records all registered profiles.
   131  var profiles struct {
   132  	mu sync.Mutex
   133  	m  map[string]*Profile
   134  }
   135  
   136  var goroutineProfile = &Profile{
   137  	name:  "goroutine",
   138  	count: countGoroutine,
   139  	write: writeGoroutine,
   140  }
   141  
   142  var threadcreateProfile = &Profile{
   143  	name:  "threadcreate",
   144  	count: countThreadCreate,
   145  	write: writeThreadCreate,
   146  }
   147  
   148  var heapProfile = &Profile{
   149  	name:  "heap",
   150  	count: countHeap,
   151  	write: writeHeap,
   152  }
   153  
   154  var blockProfile = &Profile{
   155  	name:  "block",
   156  	count: countBlock,
   157  	write: writeBlock,
   158  }
   159  
   160  var mutexProfile = &Profile{
   161  	name:  "mutex",
   162  	count: countMutex,
   163  	write: writeMutex,
   164  }
   165  
   166  func lockProfiles() {
   167  	profiles.mu.Lock()
   168  	if profiles.m == nil {
   169  		// Initial built-in profiles.
   170  		profiles.m = map[string]*Profile{
   171  			"goroutine":    goroutineProfile,
   172  			"threadcreate": threadcreateProfile,
   173  			"heap":         heapProfile,
   174  			"block":        blockProfile,
   175  			"mutex":        mutexProfile,
   176  		}
   177  	}
   178  }
   179  
   180  func unlockProfiles() {
   181  	profiles.mu.Unlock()
   182  }
   183  
   184  // NewProfile creates a new profile with the given name.
   185  // If a profile with that name already exists, NewProfile panics.
   186  // The convention is to use a 'import/path.' prefix to create
   187  // separate name spaces for each package.
   188  // For compatibility with various tools that read pprof data,
   189  // profile names should not contain spaces.
   190  func NewProfile(name string) *Profile {
   191  	lockProfiles()
   192  	defer unlockProfiles()
   193  	if name == "" {
   194  		panic("pprof: NewProfile with empty name")
   195  	}
   196  	if profiles.m[name] != nil {
   197  		panic("pprof: NewProfile name already in use: " + name)
   198  	}
   199  	p := &Profile{
   200  		name: name,
   201  		m:    map[interface{}][]uintptr{},
   202  	}
   203  	profiles.m[name] = p
   204  	return p
   205  }
   206  
   207  // Lookup returns the profile with the given name, or nil if no such profile exists.
   208  func Lookup(name string) *Profile {
   209  	lockProfiles()
   210  	defer unlockProfiles()
   211  	return profiles.m[name]
   212  }
   213  
   214  // Profiles returns a slice of all the known profiles, sorted by name.
   215  func Profiles() []*Profile {
   216  	lockProfiles()
   217  	defer unlockProfiles()
   218  
   219  	all := make([]*Profile, 0, len(profiles.m))
   220  	for _, p := range profiles.m {
   221  		all = append(all, p)
   222  	}
   223  
   224  	sort.Slice(all, func(i, j int) bool { return all[i].name < all[j].name })
   225  	return all
   226  }
   227  
   228  // Name returns this profile's name, which can be passed to Lookup to reobtain the profile.
   229  func (p *Profile) Name() string {
   230  	return p.name
   231  }
   232  
   233  // Count returns the number of execution stacks currently in the profile.
   234  func (p *Profile) Count() int {
   235  	p.mu.Lock()
   236  	defer p.mu.Unlock()
   237  	if p.count != nil {
   238  		return p.count()
   239  	}
   240  	return len(p.m)
   241  }
   242  
   243  // Add adds the current execution stack to the profile, associated with value.
   244  // Add stores value in an internal map, so value must be suitable for use as
   245  // a map key and will not be garbage collected until the corresponding
   246  // call to Remove. Add panics if the profile already contains a stack for value.
   247  //
   248  // The skip parameter has the same meaning as runtime.Caller's skip
   249  // and controls where the stack trace begins. Passing skip=0 begins the
   250  // trace in the function calling Add. For example, given this
   251  // execution stack:
   252  //
   253  //	Add
   254  //	called from rpc.NewClient
   255  //	called from mypkg.Run
   256  //	called from main.main
   257  //
   258  // Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient.
   259  // Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run.
   260  //
   261  func (p *Profile) Add(value interface{}, skip int) {
   262  	if p.name == "" {
   263  		panic("pprof: use of uninitialized Profile")
   264  	}
   265  	if p.write != nil {
   266  		panic("pprof: Add called on built-in Profile " + p.name)
   267  	}
   268  
   269  	stk := make([]uintptr, 32)
   270  	n := runtime.Callers(skip+1, stk[:])
   271  
   272  	p.mu.Lock()
   273  	defer p.mu.Unlock()
   274  	if p.m[value] != nil {
   275  		panic("pprof: Profile.Add of duplicate value")
   276  	}
   277  	p.m[value] = stk[:n]
   278  }
   279  
   280  // Remove removes the execution stack associated with value from the profile.
   281  // It is a no-op if the value is not in the profile.
   282  func (p *Profile) Remove(value interface{}) {
   283  	p.mu.Lock()
   284  	defer p.mu.Unlock()
   285  	delete(p.m, value)
   286  }
   287  
   288  // WriteTo writes a pprof-formatted snapshot of the profile to w.
   289  // If a write to w returns an error, WriteTo returns that error.
   290  // Otherwise, WriteTo returns nil.
   291  //
   292  // The debug parameter enables additional output.
   293  // Passing debug=0 prints only the hexadecimal addresses that pprof needs.
   294  // Passing debug=1 adds comments translating addresses to function names
   295  // and line numbers, so that a programmer can read the profile without tools.
   296  //
   297  // The predefined profiles may assign meaning to other debug values;
   298  // for example, when printing the "goroutine" profile, debug=2 means to
   299  // print the goroutine stacks in the same form that a Go program uses
   300  // when dying due to an unrecovered panic.
   301  func (p *Profile) WriteTo(w io.Writer, debug int) error {
   302  	if p.name == "" {
   303  		panic("pprof: use of zero Profile")
   304  	}
   305  	if p.write != nil {
   306  		return p.write(w, debug)
   307  	}
   308  
   309  	// Obtain consistent snapshot under lock; then process without lock.
   310  	all := make([][]uintptr, 0, len(p.m))
   311  	p.mu.Lock()
   312  	for _, stk := range p.m {
   313  		all = append(all, stk)
   314  	}
   315  	p.mu.Unlock()
   316  
   317  	// Map order is non-deterministic; make output deterministic.
   318  	sort.Sort(stackProfile(all))
   319  
   320  	return printCountProfile(w, debug, p.name, stackProfile(all))
   321  }
   322  
   323  type stackProfile [][]uintptr
   324  
   325  func (x stackProfile) Len() int              { return len(x) }
   326  func (x stackProfile) Stack(i int) []uintptr { return x[i] }
   327  func (x stackProfile) Swap(i, j int)         { x[i], x[j] = x[j], x[i] }
   328  func (x stackProfile) Less(i, j int) bool {
   329  	t, u := x[i], x[j]
   330  	for k := 0; k < len(t) && k < len(u); k++ {
   331  		if t[k] != u[k] {
   332  			return t[k] < u[k]
   333  		}
   334  	}
   335  	return len(t) < len(u)
   336  }
   337  
   338  // A countProfile is a set of stack traces to be printed as counts
   339  // grouped by stack trace. There are multiple implementations:
   340  // all that matters is that we can find out how many traces there are
   341  // and obtain each trace in turn.
   342  type countProfile interface {
   343  	Len() int
   344  	Stack(i int) []uintptr
   345  }
   346  
   347  // printCountProfile prints a countProfile at the specified debug level.
   348  // The profile will be in compressed proto format unless debug is nonzero.
   349  func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
   350  	// Build count of each stack.
   351  	var buf bytes.Buffer
   352  	key := func(stk []uintptr) string {
   353  		buf.Reset()
   354  		fmt.Fprintf(&buf, "@")
   355  		for _, pc := range stk {
   356  			fmt.Fprintf(&buf, " %#x", pc)
   357  		}
   358  		return buf.String()
   359  	}
   360  	count := map[string]int{}
   361  	index := map[string]int{}
   362  	var keys []string
   363  	n := p.Len()
   364  	for i := 0; i < n; i++ {
   365  		k := key(p.Stack(i))
   366  		if count[k] == 0 {
   367  			index[k] = i
   368  			keys = append(keys, k)
   369  		}
   370  		count[k]++
   371  	}
   372  
   373  	sort.Sort(&keysByCount{keys, count})
   374  
   375  	if debug > 0 {
   376  		// Print debug profile in legacy format
   377  		tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   378  		fmt.Fprintf(tw, "%s profile: total %d\n", name, p.Len())
   379  		for _, k := range keys {
   380  			fmt.Fprintf(tw, "%d %s\n", count[k], k)
   381  			printStackRecord(tw, p.Stack(index[k]), false)
   382  		}
   383  		return tw.Flush()
   384  	}
   385  
   386  	// Output profile in protobuf form.
   387  	prof := &profile.Profile{
   388  		PeriodType: &profile.ValueType{Type: name, Unit: "count"},
   389  		Period:     1,
   390  		Sample:     make([]*profile.Sample, 0, len(keys)),
   391  		SampleType: []*profile.ValueType{{Type: name, Unit: "count"}},
   392  	}
   393  	locMap := make(map[uintptr]*profile.Location)
   394  	for _, k := range keys {
   395  		stk := p.Stack(index[k])
   396  		c := count[k]
   397  		locs := make([]*profile.Location, len(stk))
   398  		for i, addr := range stk {
   399  			loc := locMap[addr]
   400  			if loc == nil {
   401  				loc = &profile.Location{
   402  					ID:      uint64(len(locMap) + 1),
   403  					Address: uint64(addr - 1),
   404  				}
   405  				prof.Location = append(prof.Location, loc)
   406  				locMap[addr] = loc
   407  			}
   408  			locs[i] = loc
   409  		}
   410  		prof.Sample = append(prof.Sample, &profile.Sample{
   411  			Location: locs,
   412  			Value:    []int64{int64(c)},
   413  		})
   414  	}
   415  	return prof.Write(w)
   416  }
   417  
   418  // keysByCount sorts keys with higher counts first, breaking ties by key string order.
   419  type keysByCount struct {
   420  	keys  []string
   421  	count map[string]int
   422  }
   423  
   424  func (x *keysByCount) Len() int      { return len(x.keys) }
   425  func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
   426  func (x *keysByCount) Less(i, j int) bool {
   427  	ki, kj := x.keys[i], x.keys[j]
   428  	ci, cj := x.count[ki], x.count[kj]
   429  	if ci != cj {
   430  		return ci > cj
   431  	}
   432  	return ki < kj
   433  }
   434  
   435  // printStackRecord prints the function + source line information
   436  // for a single stack trace.
   437  func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
   438  	show := allFrames
   439  	frames := runtime.CallersFrames(stk)
   440  	for {
   441  		frame, more := frames.Next()
   442  		name := frame.Function
   443  		if name == "" {
   444  			show = true
   445  			fmt.Fprintf(w, "#\t%#x\n", frame.PC)
   446  		} else if name != "runtime.goexit" && (show || !strings.HasPrefix(name, "runtime.")) {
   447  			// Hide runtime.goexit and any runtime functions at the beginning.
   448  			// This is useful mainly for allocation traces.
   449  			show = true
   450  			fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
   451  		}
   452  		if !more {
   453  			break
   454  		}
   455  	}
   456  	if !show {
   457  		// We didn't print anything; do it again,
   458  		// and this time include runtime functions.
   459  		printStackRecord(w, stk, true)
   460  		return
   461  	}
   462  	fmt.Fprintf(w, "\n")
   463  }
   464  
   465  // Interface to system profiles.
   466  
   467  // WriteHeapProfile is shorthand for Lookup("heap").WriteTo(w, 0).
   468  // It is preserved for backwards compatibility.
   469  func WriteHeapProfile(w io.Writer) error {
   470  	return writeHeap(w, 0)
   471  }
   472  
   473  // countHeap returns the number of records in the heap profile.
   474  func countHeap() int {
   475  	n, _ := runtime.MemProfile(nil, true)
   476  	return n
   477  }
   478  
   479  // writeHeap writes the current runtime heap profile to w.
   480  func writeHeap(w io.Writer, debug int) error {
   481  	// Find out how many records there are (MemProfile(nil, true)),
   482  	// allocate that many records, and get the data.
   483  	// There's a race—more records might be added between
   484  	// the two calls—so allocate a few extra records for safety
   485  	// and also try again if we're very unlucky.
   486  	// The loop should only execute one iteration in the common case.
   487  	var p []runtime.MemProfileRecord
   488  	n, ok := runtime.MemProfile(nil, true)
   489  	for {
   490  		// Allocate room for a slightly bigger profile,
   491  		// in case a few more entries have been added
   492  		// since the call to MemProfile.
   493  		p = make([]runtime.MemProfileRecord, n+50)
   494  		n, ok = runtime.MemProfile(p, true)
   495  		if ok {
   496  			p = p[0:n]
   497  			break
   498  		}
   499  		// Profile grew; try again.
   500  	}
   501  
   502  	if debug == 0 {
   503  		pp := protopprof.EncodeMemProfile(p, int64(runtime.MemProfileRate), time.Now())
   504  		return pp.Write(w)
   505  	}
   506  
   507  	sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() })
   508  
   509  	b := bufio.NewWriter(w)
   510  	tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0)
   511  	w = tw
   512  
   513  	var total runtime.MemProfileRecord
   514  	for i := range p {
   515  		r := &p[i]
   516  		total.AllocBytes += r.AllocBytes
   517  		total.AllocObjects += r.AllocObjects
   518  		total.FreeBytes += r.FreeBytes
   519  		total.FreeObjects += r.FreeObjects
   520  	}
   521  
   522  	// Technically the rate is MemProfileRate not 2*MemProfileRate,
   523  	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
   524  	// so that's what pprof has come to expect.
   525  	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
   526  		total.InUseObjects(), total.InUseBytes(),
   527  		total.AllocObjects, total.AllocBytes,
   528  		2*runtime.MemProfileRate)
   529  
   530  	for i := range p {
   531  		r := &p[i]
   532  		fmt.Fprintf(w, "%d: %d [%d: %d] @",
   533  			r.InUseObjects(), r.InUseBytes(),
   534  			r.AllocObjects, r.AllocBytes)
   535  		for _, pc := range r.Stack() {
   536  			fmt.Fprintf(w, " %#x", pc)
   537  		}
   538  		fmt.Fprintf(w, "\n")
   539  		printStackRecord(w, r.Stack(), false)
   540  	}
   541  
   542  	// Print memstats information too.
   543  	// Pprof will ignore, but useful for people
   544  	s := new(runtime.MemStats)
   545  	runtime.ReadMemStats(s)
   546  	fmt.Fprintf(w, "\n# runtime.MemStats\n")
   547  	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
   548  	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
   549  	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
   550  	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
   551  	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
   552  	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
   553  
   554  	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
   555  	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
   556  	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
   557  	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
   558  	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
   559  	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
   560  
   561  	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
   562  	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
   563  	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
   564  	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
   565  	fmt.Fprintf(w, "# GCSys = %d\n", s.GCSys)
   566  	fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys)
   567  
   568  	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
   569  	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
   570  	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
   571  	fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
   572  
   573  	tw.Flush()
   574  	return b.Flush()
   575  }
   576  
   577  // countThreadCreate returns the size of the current ThreadCreateProfile.
   578  func countThreadCreate() int {
   579  	n, _ := runtime.ThreadCreateProfile(nil)
   580  	return n
   581  }
   582  
   583  // writeThreadCreate writes the current runtime ThreadCreateProfile to w.
   584  func writeThreadCreate(w io.Writer, debug int) error {
   585  	return writeRuntimeProfile(w, debug, "threadcreate", runtime.ThreadCreateProfile)
   586  }
   587  
   588  // countGoroutine returns the number of goroutines.
   589  func countGoroutine() int {
   590  	return runtime.NumGoroutine()
   591  }
   592  
   593  // writeGoroutine writes the current runtime GoroutineProfile to w.
   594  func writeGoroutine(w io.Writer, debug int) error {
   595  	if debug >= 2 {
   596  		return writeGoroutineStacks(w)
   597  	}
   598  	return writeRuntimeProfile(w, debug, "goroutine", runtime.GoroutineProfile)
   599  }
   600  
   601  func writeGoroutineStacks(w io.Writer) error {
   602  	// We don't know how big the buffer needs to be to collect
   603  	// all the goroutines. Start with 1 MB and try a few times, doubling each time.
   604  	// Give up and use a truncated trace if 64 MB is not enough.
   605  	buf := make([]byte, 1<<20)
   606  	for i := 0; ; i++ {
   607  		n := runtime.Stack(buf, true)
   608  		if n < len(buf) {
   609  			buf = buf[:n]
   610  			break
   611  		}
   612  		if len(buf) >= 64<<20 {
   613  			// Filled 64 MB - stop there.
   614  			break
   615  		}
   616  		buf = make([]byte, 2*len(buf))
   617  	}
   618  	_, err := w.Write(buf)
   619  	return err
   620  }
   621  
   622  func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runtime.StackRecord) (int, bool)) error {
   623  	// Find out how many records there are (fetch(nil)),
   624  	// allocate that many records, and get the data.
   625  	// There's a race—more records might be added between
   626  	// the two calls—so allocate a few extra records for safety
   627  	// and also try again if we're very unlucky.
   628  	// The loop should only execute one iteration in the common case.
   629  	var p []runtime.StackRecord
   630  	n, ok := fetch(nil)
   631  	for {
   632  		// Allocate room for a slightly bigger profile,
   633  		// in case a few more entries have been added
   634  		// since the call to ThreadProfile.
   635  		p = make([]runtime.StackRecord, n+10)
   636  		n, ok = fetch(p)
   637  		if ok {
   638  			p = p[0:n]
   639  			break
   640  		}
   641  		// Profile grew; try again.
   642  	}
   643  
   644  	return printCountProfile(w, debug, name, runtimeProfile(p))
   645  }
   646  
   647  type runtimeProfile []runtime.StackRecord
   648  
   649  func (p runtimeProfile) Len() int              { return len(p) }
   650  func (p runtimeProfile) Stack(i int) []uintptr { return p[i].Stack() }
   651  
   652  var cpu struct {
   653  	sync.Mutex
   654  	profiling bool
   655  	done      chan bool
   656  }
   657  
   658  // StartCPUProfile enables CPU profiling for the current process.
   659  // While profiling, the profile will be buffered and written to w.
   660  // StartCPUProfile returns an error if profiling is already enabled.
   661  //
   662  // On Unix-like systems, StartCPUProfile does not work by default for
   663  // Go code built with -buildmode=c-archive or -buildmode=c-shared.
   664  // StartCPUProfile relies on the SIGPROF signal, but that signal will
   665  // be delivered to the main program's SIGPROF signal handler (if any)
   666  // not to the one used by Go. To make it work, call os/signal.Notify
   667  // for syscall.SIGPROF, but note that doing so may break any profiling
   668  // being done by the main program.
   669  func StartCPUProfile(w io.Writer) error {
   670  	// The runtime routines allow a variable profiling rate,
   671  	// but in practice operating systems cannot trigger signals
   672  	// at more than about 500 Hz, and our processing of the
   673  	// signal is not cheap (mostly getting the stack trace).
   674  	// 100 Hz is a reasonable choice: it is frequent enough to
   675  	// produce useful data, rare enough not to bog down the
   676  	// system, and a nice round number to make it easy to
   677  	// convert sample counts to seconds. Instead of requiring
   678  	// each client to specify the frequency, we hard code it.
   679  	const hz = 100
   680  
   681  	cpu.Lock()
   682  	defer cpu.Unlock()
   683  	if cpu.done == nil {
   684  		cpu.done = make(chan bool)
   685  	}
   686  	// Double-check.
   687  	if cpu.profiling {
   688  		return fmt.Errorf("cpu profiling already in use")
   689  	}
   690  	cpu.profiling = true
   691  	runtime.SetCPUProfileRate(hz)
   692  	go profileWriter(w)
   693  	return nil
   694  }
   695  
   696  func profileWriter(w io.Writer) {
   697  	startTime := time.Now()
   698  	// This will buffer the entire profile into buf and then
   699  	// translate it into a profile.Profile structure. This will
   700  	// create two copies of all the data in the profile in memory.
   701  	// TODO(matloob): Convert each chunk of the proto output and
   702  	// stream it out instead of converting the entire profile.
   703  	var buf bytes.Buffer
   704  	for {
   705  		data := runtime.CPUProfile()
   706  		if data == nil {
   707  			break
   708  		}
   709  		buf.Write(data)
   710  	}
   711  
   712  	profile, err := protopprof.TranslateCPUProfile(buf.Bytes(), startTime)
   713  	if err != nil {
   714  		// The runtime should never produce an invalid or truncated profile.
   715  		// It drops records that can't fit into its log buffers.
   716  		panic(fmt.Errorf("could not translate binary profile to proto format: %v", err))
   717  	}
   718  
   719  	profile.Write(w)
   720  	cpu.done <- true
   721  }
   722  
   723  // StopCPUProfile stops the current CPU profile, if any.
   724  // StopCPUProfile only returns after all the writes for the
   725  // profile have completed.
   726  func StopCPUProfile() {
   727  	cpu.Lock()
   728  	defer cpu.Unlock()
   729  
   730  	if !cpu.profiling {
   731  		return
   732  	}
   733  	cpu.profiling = false
   734  	runtime.SetCPUProfileRate(0)
   735  	<-cpu.done
   736  }
   737  
   738  // countBlock returns the number of records in the blocking profile.
   739  func countBlock() int {
   740  	n, _ := runtime.BlockProfile(nil)
   741  	return n
   742  }
   743  
   744  // countMutex returns the number of records in the mutex profile.
   745  func countMutex() int {
   746  	n, _ := runtime.MutexProfile(nil)
   747  	return n
   748  }
   749  
   750  // writeBlock writes the current blocking profile to w.
   751  func writeBlock(w io.Writer, debug int) error {
   752  	var p []runtime.BlockProfileRecord
   753  	n, ok := runtime.BlockProfile(nil)
   754  	for {
   755  		p = make([]runtime.BlockProfileRecord, n+50)
   756  		n, ok = runtime.BlockProfile(p)
   757  		if ok {
   758  			p = p[:n]
   759  			break
   760  		}
   761  	}
   762  
   763  	sort.Slice(p, func(i, j int) bool { return p[i].Cycles > p[j].Cycles })
   764  
   765  	b := bufio.NewWriter(w)
   766  	var tw *tabwriter.Writer
   767  	w = b
   768  	if debug > 0 {
   769  		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   770  		w = tw
   771  	}
   772  
   773  	fmt.Fprintf(w, "--- contention:\n")
   774  	fmt.Fprintf(w, "cycles/second=%v\n", runtime_cyclesPerSecond())
   775  	for i := range p {
   776  		r := &p[i]
   777  		fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
   778  		for _, pc := range r.Stack() {
   779  			fmt.Fprintf(w, " %#x", pc)
   780  		}
   781  		fmt.Fprint(w, "\n")
   782  		if debug > 0 {
   783  			printStackRecord(w, r.Stack(), true)
   784  		}
   785  	}
   786  
   787  	if tw != nil {
   788  		tw.Flush()
   789  	}
   790  	return b.Flush()
   791  }
   792  
   793  // writeMutex writes the current mutex profile to w.
   794  func writeMutex(w io.Writer, debug int) error {
   795  	// TODO(pjw): too much common code with writeBlock. FIX!
   796  	var p []runtime.BlockProfileRecord
   797  	n, ok := runtime.MutexProfile(nil)
   798  	for {
   799  		p = make([]runtime.BlockProfileRecord, n+50)
   800  		n, ok = runtime.MutexProfile(p)
   801  		if ok {
   802  			p = p[:n]
   803  			break
   804  		}
   805  	}
   806  
   807  	sort.Slice(p, func(i, j int) bool { return p[i].Cycles > p[j].Cycles })
   808  
   809  	b := bufio.NewWriter(w)
   810  	var tw *tabwriter.Writer
   811  	w = b
   812  	if debug > 0 {
   813  		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   814  		w = tw
   815  	}
   816  
   817  	fmt.Fprintf(w, "--- mutex:\n")
   818  	fmt.Fprintf(w, "cycles/second=%v\n", runtime_cyclesPerSecond())
   819  	fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1))
   820  	for i := range p {
   821  		r := &p[i]
   822  		fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
   823  		for _, pc := range r.Stack() {
   824  			fmt.Fprintf(w, " %#x", pc)
   825  		}
   826  		fmt.Fprint(w, "\n")
   827  		if debug > 0 {
   828  			printStackRecord(w, r.Stack(), true)
   829  		}
   830  	}
   831  
   832  	if tw != nil {
   833  		tw.Flush()
   834  	}
   835  	return b.Flush()
   836  }
   837  
   838  func runtime_cyclesPerSecond() int64