github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/runtime/pprof/pprof.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package pprof writes runtime profiling data in the format expected
     6  // by the pprof visualization tool.
     7  // For more information about pprof, see
     8  // http://code.google.com/p/google-perftools/.
     9  package pprof
    10  
    11  import (
    12  	"bufio"
    13  	"bytes"
    14  	"fmt"
    15  	"io"
    16  	"os"
    17  	"runtime"
    18  	"sort"
    19  	"strings"
    20  	"sync"
    21  	"text/tabwriter"
    22  )
    23  
    24  // BUG(rsc): Profiles are only as good as the kernel support used to generate them.
    25  // See https://golang.org/issue/13841 for details about known problems.
    26  
    27  // A Profile is a collection of stack traces showing the call sequences
    28  // that led to instances of a particular event, such as allocation.
    29  // Packages can create and maintain their own profiles; the most common
    30  // use is for tracking resources that must be explicitly closed, such as files
    31  // or network connections.
    32  //
    33  // A Profile's methods can be called from multiple goroutines simultaneously.
    34  //
    35  // Each Profile has a unique name. A few profiles are predefined:
    36  //
    37  //	goroutine    - stack traces of all current goroutines
    38  //	heap         - a sampling of all heap allocations
    39  //	threadcreate - stack traces that led to the creation of new OS threads
    40  //	block        - stack traces that led to blocking on synchronization primitives
    41  //
    42  // These predefined profiles maintain themselves and panic on an explicit
    43  // Add or Remove method call.
    44  //
    45  // The heap profile reports statistics as of the most recently completed
    46  // garbage collection; it elides more recent allocation to avoid skewing
    47  // the profile away from live data and toward garbage.
    48  // If there has been no garbage collection at all, the heap profile reports
    49  // all known allocations. This exception helps mainly in programs running
    50  // without garbage collection enabled, usually for debugging purposes.
    51  //
    52  // The CPU profile is not available as a Profile. It has a special API,
    53  // the StartCPUProfile and StopCPUProfile functions, because it streams
    54  // output to a writer during profiling.
    55  //
    56  type Profile struct {
    57  	name  string
    58  	mu    sync.Mutex
    59  	m     map[interface{}][]uintptr
    60  	count func() int
    61  	write func(io.Writer, int) error
    62  }
    63  
    64  // profiles records all registered profiles.
    65  var profiles struct {
    66  	mu sync.Mutex
    67  	m  map[string]*Profile
    68  }
    69  
    70  var goroutineProfile = &Profile{
    71  	name:  "goroutine",
    72  	count: countGoroutine,
    73  	write: writeGoroutine,
    74  }
    75  
    76  var threadcreateProfile = &Profile{
    77  	name:  "threadcreate",
    78  	count: countThreadCreate,
    79  	write: writeThreadCreate,
    80  }
    81  
    82  var heapProfile = &Profile{
    83  	name:  "heap",
    84  	count: countHeap,
    85  	write: writeHeap,
    86  }
    87  
    88  var blockProfile = &Profile{
    89  	name:  "block",
    90  	count: countBlock,
    91  	write: writeBlock,
    92  }
    93  
    94  func lockProfiles() {
    95  	profiles.mu.Lock()
    96  	if profiles.m == nil {
    97  		// Initial built-in profiles.
    98  		profiles.m = map[string]*Profile{
    99  			"goroutine":    goroutineProfile,
   100  			"threadcreate": threadcreateProfile,
   101  			"heap":         heapProfile,
   102  			"block":        blockProfile,
   103  		}
   104  	}
   105  }
   106  
   107  func unlockProfiles() {
   108  	profiles.mu.Unlock()
   109  }
   110  
   111  // NewProfile creates a new profile with the given name.
   112  // If a profile with that name already exists, NewProfile panics.
   113  // The convention is to use a 'import/path.' prefix to create
   114  // separate name spaces for each package.
   115  func NewProfile(name string) *Profile {
   116  	lockProfiles()
   117  	defer unlockProfiles()
   118  	if name == "" {
   119  		panic("pprof: NewProfile with empty name")
   120  	}
   121  	if profiles.m[name] != nil {
   122  		panic("pprof: NewProfile name already in use: " + name)
   123  	}
   124  	p := &Profile{
   125  		name: name,
   126  		m:    map[interface{}][]uintptr{},
   127  	}
   128  	profiles.m[name] = p
   129  	return p
   130  }
   131  
   132  // Lookup returns the profile with the given name, or nil if no such profile exists.
   133  func Lookup(name string) *Profile {
   134  	lockProfiles()
   135  	defer unlockProfiles()
   136  	return profiles.m[name]
   137  }
   138  
   139  // Profiles returns a slice of all the known profiles, sorted by name.
   140  func Profiles() []*Profile {
   141  	lockProfiles()
   142  	defer unlockProfiles()
   143  
   144  	var all []*Profile
   145  	for _, p := range profiles.m {
   146  		all = append(all, p)
   147  	}
   148  
   149  	sort.Sort(byName(all))
   150  	return all
   151  }
   152  
   153  type byName []*Profile
   154  
   155  func (x byName) Len() int           { return len(x) }
   156  func (x byName) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
   157  func (x byName) Less(i, j int) bool { return x[i].name < x[j].name }
   158  
   159  // Name returns this profile's name, which can be passed to Lookup to reobtain the profile.
   160  func (p *Profile) Name() string {
   161  	return p.name
   162  }
   163  
   164  // Count returns the number of execution stacks currently in the profile.
   165  func (p *Profile) Count() int {
   166  	p.mu.Lock()
   167  	defer p.mu.Unlock()
   168  	if p.count != nil {
   169  		return p.count()
   170  	}
   171  	return len(p.m)
   172  }
   173  
   174  // Add adds the current execution stack to the profile, associated with value.
   175  // Add stores value in an internal map, so value must be suitable for use as
   176  // a map key and will not be garbage collected until the corresponding
   177  // call to Remove. Add panics if the profile already contains a stack for value.
   178  //
   179  // The skip parameter has the same meaning as runtime.Caller's skip
   180  // and controls where the stack trace begins. Passing skip=0 begins the
   181  // trace in the function calling Add. For example, given this
   182  // execution stack:
   183  //
   184  //	Add
   185  //	called from rpc.NewClient
   186  //	called from mypkg.Run
   187  //	called from main.main
   188  //
   189  // Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient.
   190  // Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run.
   191  //
   192  func (p *Profile) Add(value interface{}, skip int) {
   193  	if p.name == "" {
   194  		panic("pprof: use of uninitialized Profile")
   195  	}
   196  	if p.write != nil {
   197  		panic("pprof: Add called on built-in Profile " + p.name)
   198  	}
   199  
   200  	stk := make([]uintptr, 32)
   201  	n := runtime.Callers(skip+1, stk[:])
   202  
   203  	p.mu.Lock()
   204  	defer p.mu.Unlock()
   205  	if p.m[value] != nil {
   206  		panic("pprof: Profile.Add of duplicate value")
   207  	}
   208  	p.m[value] = stk[:n]
   209  }
   210  
   211  // Remove removes the execution stack associated with value from the profile.
   212  // It is a no-op if the value is not in the profile.
   213  func (p *Profile) Remove(value interface{}) {
   214  	p.mu.Lock()
   215  	defer p.mu.Unlock()
   216  	delete(p.m, value)
   217  }
   218  
   219  // WriteTo writes a pprof-formatted snapshot of the profile to w.
   220  // If a write to w returns an error, WriteTo returns that error.
   221  // Otherwise, WriteTo returns nil.
   222  //
   223  // The debug parameter enables additional output.
   224  // Passing debug=0 prints only the hexadecimal addresses that pprof needs.
   225  // Passing debug=1 adds comments translating addresses to function names
   226  // and line numbers, so that a programmer can read the profile without tools.
   227  //
   228  // The predefined profiles may assign meaning to other debug values;
   229  // for example, when printing the "goroutine" profile, debug=2 means to
   230  // print the goroutine stacks in the same form that a Go program uses
   231  // when dying due to an unrecovered panic.
   232  func (p *Profile) WriteTo(w io.Writer, debug int) error {
   233  	if p.name == "" {
   234  		panic("pprof: use of zero Profile")
   235  	}
   236  	if p.write != nil {
   237  		return p.write(w, debug)
   238  	}
   239  
   240  	// Obtain consistent snapshot under lock; then process without lock.
   241  	var all [][]uintptr
   242  	p.mu.Lock()
   243  	for _, stk := range p.m {
   244  		all = append(all, stk)
   245  	}
   246  	p.mu.Unlock()
   247  
   248  	// Map order is non-deterministic; make output deterministic.
   249  	sort.Sort(stackProfile(all))
   250  
   251  	return printCountProfile(w, debug, p.name, stackProfile(all))
   252  }
   253  
   254  type stackProfile [][]uintptr
   255  
   256  func (x stackProfile) Len() int              { return len(x) }
   257  func (x stackProfile) Stack(i int) []uintptr { return x[i] }
   258  func (x stackProfile) Swap(i, j int)         { x[i], x[j] = x[j], x[i] }
   259  func (x stackProfile) Less(i, j int) bool {
   260  	t, u := x[i], x[j]
   261  	for k := 0; k < len(t) && k < len(u); k++ {
   262  		if t[k] != u[k] {
   263  			return t[k] < u[k]
   264  		}
   265  	}
   266  	return len(t) < len(u)
   267  }
   268  
   269  // A countProfile is a set of stack traces to be printed as counts
   270  // grouped by stack trace. There are multiple implementations:
   271  // all that matters is that we can find out how many traces there are
   272  // and obtain each trace in turn.
   273  type countProfile interface {
   274  	Len() int
   275  	Stack(i int) []uintptr
   276  }
   277  
   278  // printCountProfile prints a countProfile at the specified debug level.
   279  func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
   280  	b := bufio.NewWriter(w)
   281  	var tw *tabwriter.Writer
   282  	w = b
   283  	if debug > 0 {
   284  		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   285  		w = tw
   286  	}
   287  
   288  	fmt.Fprintf(w, "%s profile: total %d\n", name, p.Len())
   289  
   290  	// Build count of each stack.
   291  	var buf bytes.Buffer
   292  	key := func(stk []uintptr) string {
   293  		buf.Reset()
   294  		fmt.Fprintf(&buf, "@")
   295  		for _, pc := range stk {
   296  			fmt.Fprintf(&buf, " %#x", pc)
   297  		}
   298  		return buf.String()
   299  	}
   300  	count := map[string]int{}
   301  	index := map[string]int{}
   302  	var keys []string
   303  	n := p.Len()
   304  	for i := 0; i < n; i++ {
   305  		k := key(p.Stack(i))
   306  		if count[k] == 0 {
   307  			index[k] = i
   308  			keys = append(keys, k)
   309  		}
   310  		count[k]++
   311  	}
   312  
   313  	sort.Sort(&keysByCount{keys, count})
   314  
   315  	for _, k := range keys {
   316  		fmt.Fprintf(w, "%d %s\n", count[k], k)
   317  		if debug > 0 {
   318  			printStackRecord(w, p.Stack(index[k]), false)
   319  		}
   320  	}
   321  
   322  	if tw != nil {
   323  		tw.Flush()
   324  	}
   325  	return b.Flush()
   326  }
   327  
   328  // keysByCount sorts keys with higher counts first, breaking ties by key string order.
   329  type keysByCount struct {
   330  	keys  []string
   331  	count map[string]int
   332  }
   333  
   334  func (x *keysByCount) Len() int      { return len(x.keys) }
   335  func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
   336  func (x *keysByCount) Less(i, j int) bool {
   337  	ki, kj := x.keys[i], x.keys[j]
   338  	ci, cj := x.count[ki], x.count[kj]
   339  	if ci != cj {
   340  		return ci > cj
   341  	}
   342  	return ki < kj
   343  }
   344  
   345  // printStackRecord prints the function + source line information
   346  // for a single stack trace.
   347  func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
   348  	show := allFrames
   349  	frames := runtime.CallersFrames(stk)
   350  	for {
   351  		frame, more := frames.Next()
   352  		name := frame.Function
   353  		if name == "" {
   354  			show = true
   355  			fmt.Fprintf(w, "#\t%#x\n", frame.PC)
   356  		} else {
   357  			// Hide runtime.goexit and any runtime functions at the beginning.
   358  			// This is useful mainly for allocation traces.
   359  			if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") {
   360  				continue
   361  			}
   362  			show = true
   363  			fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
   364  		}
   365  		if !more {
   366  			break
   367  		}
   368  	}
   369  	if !show {
   370  		// We didn't print anything; do it again,
   371  		// and this time include runtime functions.
   372  		printStackRecord(w, stk, true)
   373  		return
   374  	}
   375  	fmt.Fprintf(w, "\n")
   376  }
   377  
   378  // Interface to system profiles.
   379  
   380  type byInUseBytes []runtime.MemProfileRecord
   381  
   382  func (x byInUseBytes) Len() int           { return len(x) }
   383  func (x byInUseBytes) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
   384  func (x byInUseBytes) Less(i, j int) bool { return x[i].InUseBytes() > x[j].InUseBytes() }
   385  
   386  // WriteHeapProfile is shorthand for Lookup("heap").WriteTo(w, 0).
   387  // It is preserved for backwards compatibility.
   388  func WriteHeapProfile(w io.Writer) error {
   389  	return writeHeap(w, 0)
   390  }
   391  
   392  // countHeap returns the number of records in the heap profile.
   393  func countHeap() int {
   394  	n, _ := runtime.MemProfile(nil, true)
   395  	return n
   396  }
   397  
   398  // writeHeap writes the current runtime heap profile to w.
   399  func writeHeap(w io.Writer, debug int) error {
   400  	// Find out how many records there are (MemProfile(nil, true)),
   401  	// allocate that many records, and get the data.
   402  	// There's a race—more records might be added between
   403  	// the two calls—so allocate a few extra records for safety
   404  	// and also try again if we're very unlucky.
   405  	// The loop should only execute one iteration in the common case.
   406  	var p []runtime.MemProfileRecord
   407  	n, ok := runtime.MemProfile(nil, true)
   408  	for {
   409  		// Allocate room for a slightly bigger profile,
   410  		// in case a few more entries have been added
   411  		// since the call to MemProfile.
   412  		p = make([]runtime.MemProfileRecord, n+50)
   413  		n, ok = runtime.MemProfile(p, true)
   414  		if ok {
   415  			p = p[0:n]
   416  			break
   417  		}
   418  		// Profile grew; try again.
   419  	}
   420  
   421  	sort.Sort(byInUseBytes(p))
   422  
   423  	b := bufio.NewWriter(w)
   424  	var tw *tabwriter.Writer
   425  	w = b
   426  	if debug > 0 {
   427  		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   428  		w = tw
   429  	}
   430  
   431  	var total runtime.MemProfileRecord
   432  	for i := range p {
   433  		r := &p[i]
   434  		total.AllocBytes += r.AllocBytes
   435  		total.AllocObjects += r.AllocObjects
   436  		total.FreeBytes += r.FreeBytes
   437  		total.FreeObjects += r.FreeObjects
   438  	}
   439  
   440  	// Technically the rate is MemProfileRate not 2*MemProfileRate,
   441  	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
   442  	// so that's what pprof has come to expect.
   443  	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
   444  		total.InUseObjects(), total.InUseBytes(),
   445  		total.AllocObjects, total.AllocBytes,
   446  		2*runtime.MemProfileRate)
   447  
   448  	for i := range p {
   449  		r := &p[i]
   450  		fmt.Fprintf(w, "%d: %d [%d: %d] @",
   451  			r.InUseObjects(), r.InUseBytes(),
   452  			r.AllocObjects, r.AllocBytes)
   453  		for _, pc := range r.Stack() {
   454  			fmt.Fprintf(w, " %#x", pc)
   455  		}
   456  		fmt.Fprintf(w, "\n")
   457  		if debug > 0 {
   458  			printStackRecord(w, r.Stack(), false)
   459  		}
   460  	}
   461  
   462  	// Print memstats information too.
   463  	// Pprof will ignore, but useful for people
   464  	s := new(runtime.MemStats)
   465  	runtime.ReadMemStats(s)
   466  	fmt.Fprintf(w, "\n# runtime.MemStats\n")
   467  	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
   468  	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
   469  	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
   470  	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
   471  	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
   472  	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
   473  
   474  	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
   475  	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
   476  	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
   477  	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
   478  	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
   479  	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
   480  
   481  	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
   482  	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
   483  	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
   484  	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
   485  
   486  	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
   487  	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
   488  	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
   489  	fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
   490  
   491  	if tw != nil {
   492  		tw.Flush()
   493  	}
   494  	return b.Flush()
   495  }
   496  
   497  // countThreadCreate returns the size of the current ThreadCreateProfile.
   498  func countThreadCreate() int {
   499  	n, _ := runtime.ThreadCreateProfile(nil)
   500  	return n
   501  }
   502  
   503  // writeThreadCreate writes the current runtime ThreadCreateProfile to w.
   504  func writeThreadCreate(w io.Writer, debug int) error {
   505  	return writeRuntimeProfile(w, debug, "threadcreate", runtime.ThreadCreateProfile)
   506  }
   507  
   508  // countGoroutine returns the number of goroutines.
   509  func countGoroutine() int {
   510  	return runtime.NumGoroutine()
   511  }
   512  
   513  // writeGoroutine writes the current runtime GoroutineProfile to w.
   514  func writeGoroutine(w io.Writer, debug int) error {
   515  	if debug >= 2 {
   516  		return writeGoroutineStacks(w)
   517  	}
   518  	return writeRuntimeProfile(w, debug, "goroutine", runtime.GoroutineProfile)
   519  }
   520  
   521  func writeGoroutineStacks(w io.Writer) error {
   522  	// We don't know how big the buffer needs to be to collect
   523  	// all the goroutines. Start with 1 MB and try a few times, doubling each time.
   524  	// Give up and use a truncated trace if 64 MB is not enough.
   525  	buf := make([]byte, 1<<20)
   526  	for i := 0; ; i++ {
   527  		n := runtime.Stack(buf, true)
   528  		if n < len(buf) {
   529  			buf = buf[:n]
   530  			break
   531  		}
   532  		if len(buf) >= 64<<20 {
   533  			// Filled 64 MB - stop there.
   534  			break
   535  		}
   536  		buf = make([]byte, 2*len(buf))
   537  	}
   538  	_, err := w.Write(buf)
   539  	return err
   540  }
   541  
   542  func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runtime.StackRecord) (int, bool)) error {
   543  	// Find out how many records there are (fetch(nil)),
   544  	// allocate that many records, and get the data.
   545  	// There's a race—more records might be added between
   546  	// the two calls—so allocate a few extra records for safety
   547  	// and also try again if we're very unlucky.
   548  	// The loop should only execute one iteration in the common case.
   549  	var p []runtime.StackRecord
   550  	n, ok := fetch(nil)
   551  	for {
   552  		// Allocate room for a slightly bigger profile,
   553  		// in case a few more entries have been added
   554  		// since the call to ThreadProfile.
   555  		p = make([]runtime.StackRecord, n+10)
   556  		n, ok = fetch(p)
   557  		if ok {
   558  			p = p[0:n]
   559  			break
   560  		}
   561  		// Profile grew; try again.
   562  	}
   563  
   564  	return printCountProfile(w, debug, name, runtimeProfile(p))
   565  }
   566  
   567  type runtimeProfile []runtime.StackRecord
   568  
   569  func (p runtimeProfile) Len() int              { return len(p) }
   570  func (p runtimeProfile) Stack(i int) []uintptr { return p[i].Stack() }
   571  
   572  var cpu struct {
   573  	sync.Mutex
   574  	profiling bool
   575  	done      chan bool
   576  }
   577  
   578  // StartCPUProfile enables CPU profiling for the current process.
   579  // While profiling, the profile will be buffered and written to w.
   580  // StartCPUProfile returns an error if profiling is already enabled.
   581  //
   582  // On Unix-like systems, StartCPUProfile does not work by default for
   583  // Go code built with -buildmode=c-archive or -buildmode=c-shared.
   584  // StartCPUProfile relies on the SIGPROF signal, but that signal will
   585  // be delivered to the main program's SIGPROF signal handler (if any)
   586  // not to the one used by Go. To make it work, call os/signal.Notify
   587  // for syscall.SIGPROF, but note that doing so may break any profiling
   588  // being done by the main program.
   589  func StartCPUProfile(w io.Writer) error {
   590  	// The runtime routines allow a variable profiling rate,
   591  	// but in practice operating systems cannot trigger signals
   592  	// at more than about 500 Hz, and our processing of the
   593  	// signal is not cheap (mostly getting the stack trace).
   594  	// 100 Hz is a reasonable choice: it is frequent enough to
   595  	// produce useful data, rare enough not to bog down the
   596  	// system, and a nice round number to make it easy to
   597  	// convert sample counts to seconds. Instead of requiring
   598  	// each client to specify the frequency, we hard code it.
   599  	const hz = 100
   600  
   601  	cpu.Lock()
   602  	defer cpu.Unlock()
   603  	if cpu.done == nil {
   604  		cpu.done = make(chan bool)
   605  	}
   606  	// Double-check.
   607  	if cpu.profiling {
   608  		return fmt.Errorf("cpu profiling already in use")
   609  	}
   610  	cpu.profiling = true
   611  	runtime.SetCPUProfileRate(hz)
   612  	go profileWriter(w)
   613  	return nil
   614  }
   615  
   616  func profileWriter(w io.Writer) {
   617  	for {
   618  		data := runtime.CPUProfile()
   619  		if data == nil {
   620  			break
   621  		}
   622  		w.Write(data)
   623  	}
   624  
   625  	// We are emitting the legacy profiling format, which permits
   626  	// a memory map following the CPU samples. The memory map is
   627  	// simply a copy of the GNU/Linux /proc/self/maps file. The
   628  	// profiler uses the memory map to map PC values in shared
   629  	// libraries to a shared library in the filesystem, in order
   630  	// to report the correct function and, if the shared library
   631  	// has debug info, file/line. This is particularly useful for
   632  	// PIE (position independent executables) as on ELF systems a
   633  	// PIE is simply an executable shared library.
   634  	//
   635  	// Because the profiling format expects the memory map in
   636  	// GNU/Linux format, we only do this on GNU/Linux for now. To
   637  	// add support for profiling PIE on other ELF-based systems,
   638  	// it may be necessary to map the system-specific mapping
   639  	// information to the GNU/Linux format. For a reasonably
   640  	// portable C++ version, see the FillProcSelfMaps function in
   641  	// https://github.com/gperftools/gperftools/blob/master/src/base/sysinfo.cc
   642  	//
   643  	// The code that parses this mapping for the pprof tool is
   644  	// ParseMemoryMap in cmd/internal/pprof/legacy_profile.go, but
   645  	// don't change that code, as similar code exists in other
   646  	// (non-Go) pprof readers. Change this code so that that code works.
   647  	//
   648  	// We ignore errors reading or copying the memory map; the
   649  	// profile is likely usable without it, and we have no good way
   650  	// to report errors.
   651  	if runtime.GOOS == "linux" {
   652  		f, err := os.Open("/proc/self/maps")
   653  		if err == nil {
   654  			io.WriteString(w, "\nMAPPED_LIBRARIES:\n")
   655  			io.Copy(w, f)
   656  			f.Close()
   657  		}
   658  	}
   659  
   660  	cpu.done <- true
   661  }
   662  
   663  // StopCPUProfile stops the current CPU profile, if any.
   664  // StopCPUProfile only returns after all the writes for the
   665  // profile have completed.
   666  func StopCPUProfile() {
   667  	cpu.Lock()
   668  	defer cpu.Unlock()
   669  
   670  	if !cpu.profiling {
   671  		return
   672  	}
   673  	cpu.profiling = false
   674  	runtime.SetCPUProfileRate(0)
   675  	<-cpu.done
   676  }
   677  
   678  type byCycles []runtime.BlockProfileRecord
   679  
   680  func (x byCycles) Len() int           { return len(x) }
   681  func (x byCycles) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
   682  func (x byCycles) Less(i, j int) bool { return x[i].Cycles > x[j].Cycles }
   683  
   684  // countBlock returns the number of records in the blocking profile.
   685  func countBlock() int {
   686  	n, _ := runtime.BlockProfile(nil)
   687  	return n
   688  }
   689  
   690  // writeBlock writes the current blocking profile to w.
   691  func writeBlock(w io.Writer, debug int) error {
   692  	var p []runtime.BlockProfileRecord
   693  	n, ok := runtime.BlockProfile(nil)
   694  	for {
   695  		p = make([]runtime.BlockProfileRecord, n+50)
   696  		n, ok = runtime.BlockProfile(p)
   697  		if ok {
   698  			p = p[:n]
   699  			break
   700  		}
   701  	}
   702  
   703  	sort.Sort(byCycles(p))
   704  
   705  	b := bufio.NewWriter(w)
   706  	var tw *tabwriter.Writer
   707  	w = b
   708  	if debug > 0 {
   709  		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
   710  		w = tw
   711  	}
   712  
   713  	fmt.Fprintf(w, "--- contention:\n")
   714  	fmt.Fprintf(w, "cycles/second=%v\n", runtime_cyclesPerSecond())
   715  	for i := range p {
   716  		r := &p[i]
   717  		fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
   718  		for _, pc := range r.Stack() {
   719  			fmt.Fprintf(w, " %#x", pc)
   720  		}
   721  		fmt.Fprint(w, "\n")
   722  		if debug > 0 {
   723  			printStackRecord(w, r.Stack(), true)
   724  		}
   725  	}
   726  
   727  	if tw != nil {
   728  		tw.Flush()
   729  	}
   730  	return b.Flush()
   731  }
   732  
   733  func runtime_cyclesPerSecond() int64