github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/go/runtime/pprof/proto.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package pprof
     6  
     7  import (
     8  	"bytes"
     9  	"compress/gzip"
    10  	"fmt"
    11  	internalcpu "internal/cpu"
    12  	"io"
    13  	"io/ioutil"
    14  	"runtime"
    15  	"strconv"
    16  	"time"
    17  	"unsafe"
    18  )
    19  
    20  // lostProfileEvent is the function to which lost profiling
    21  // events are attributed.
    22  // (The name shows up in the pprof graphs.)
    23  func lostProfileEvent() { lostProfileEvent() }
    24  
    25  // funcPC returns the PC for the func value f.
    26  func funcPC(f interface{}) uintptr {
    27  	type iface struct {
    28  		tab  unsafe.Pointer
    29  		data unsafe.Pointer
    30  	}
    31  	i := (*iface)(unsafe.Pointer(&f))
    32  	r := *(*uintptr)(i.data)
    33  	if internalcpu.FunctionDescriptors {
    34  		// With PPC64 ELF ABI v1 function descriptors the
    35  		// function address is a pointer to a struct whose
    36  		// first field is the actual PC.
    37  		r = *(*uintptr)(unsafe.Pointer(r))
    38  	}
    39  	return r
    40  }
    41  
    42  // A profileBuilder writes a profile incrementally from a
    43  // stream of profile samples delivered by the runtime.
    44  type profileBuilder struct {
    45  	start      time.Time
    46  	end        time.Time
    47  	havePeriod bool
    48  	period     int64
    49  	m          profMap
    50  
    51  	// encoding state
    52  	w         io.Writer
    53  	zw        *gzip.Writer
    54  	pb        protobuf
    55  	strings   []string
    56  	stringMap map[string]int
    57  	locs      map[uintptr]locInfo // list of locInfo starting with the given PC.
    58  	funcs     map[string]int      // Package path-qualified function name to Function.ID
    59  	mem       []memMap
    60  	deck      pcDeck
    61  }
    62  
    63  type memMap struct {
    64  	// initialized as reading mapping
    65  	start         uintptr
    66  	end           uintptr
    67  	offset        uint64
    68  	file, buildID string
    69  
    70  	funcs symbolizeFlag
    71  	fake  bool // map entry was faked; /proc/self/maps wasn't available
    72  }
    73  
    74  // symbolizeFlag keeps track of symbolization result.
    75  //   0                  : no symbol lookup was performed
    76  //   1<<0 (lookupTried) : symbol lookup was performed
    77  //   1<<1 (lookupFailed): symbol lookup was performed but failed
    78  type symbolizeFlag uint8
    79  
    80  const (
    81  	lookupTried  symbolizeFlag = 1 << iota
    82  	lookupFailed symbolizeFlag = 1 << iota
    83  )
    84  
    85  const (
    86  	// message Profile
    87  	tagProfile_SampleType        = 1  // repeated ValueType
    88  	tagProfile_Sample            = 2  // repeated Sample
    89  	tagProfile_Mapping           = 3  // repeated Mapping
    90  	tagProfile_Location          = 4  // repeated Location
    91  	tagProfile_Function          = 5  // repeated Function
    92  	tagProfile_StringTable       = 6  // repeated string
    93  	tagProfile_DropFrames        = 7  // int64 (string table index)
    94  	tagProfile_KeepFrames        = 8  // int64 (string table index)
    95  	tagProfile_TimeNanos         = 9  // int64
    96  	tagProfile_DurationNanos     = 10 // int64
    97  	tagProfile_PeriodType        = 11 // ValueType (really optional string???)
    98  	tagProfile_Period            = 12 // int64
    99  	tagProfile_Comment           = 13 // repeated int64
   100  	tagProfile_DefaultSampleType = 14 // int64
   101  
   102  	// message ValueType
   103  	tagValueType_Type = 1 // int64 (string table index)
   104  	tagValueType_Unit = 2 // int64 (string table index)
   105  
   106  	// message Sample
   107  	tagSample_Location = 1 // repeated uint64
   108  	tagSample_Value    = 2 // repeated int64
   109  	tagSample_Label    = 3 // repeated Label
   110  
   111  	// message Label
   112  	tagLabel_Key = 1 // int64 (string table index)
   113  	tagLabel_Str = 2 // int64 (string table index)
   114  	tagLabel_Num = 3 // int64
   115  
   116  	// message Mapping
   117  	tagMapping_ID              = 1  // uint64
   118  	tagMapping_Start           = 2  // uint64
   119  	tagMapping_Limit           = 3  // uint64
   120  	tagMapping_Offset          = 4  // uint64
   121  	tagMapping_Filename        = 5  // int64 (string table index)
   122  	tagMapping_BuildID         = 6  // int64 (string table index)
   123  	tagMapping_HasFunctions    = 7  // bool
   124  	tagMapping_HasFilenames    = 8  // bool
   125  	tagMapping_HasLineNumbers  = 9  // bool
   126  	tagMapping_HasInlineFrames = 10 // bool
   127  
   128  	// message Location
   129  	tagLocation_ID        = 1 // uint64
   130  	tagLocation_MappingID = 2 // uint64
   131  	tagLocation_Address   = 3 // uint64
   132  	tagLocation_Line      = 4 // repeated Line
   133  
   134  	// message Line
   135  	tagLine_FunctionID = 1 // uint64
   136  	tagLine_Line       = 2 // int64
   137  
   138  	// message Function
   139  	tagFunction_ID         = 1 // uint64
   140  	tagFunction_Name       = 2 // int64 (string table index)
   141  	tagFunction_SystemName = 3 // int64 (string table index)
   142  	tagFunction_Filename   = 4 // int64 (string table index)
   143  	tagFunction_StartLine  = 5 // int64
   144  )
   145  
   146  // stringIndex adds s to the string table if not already present
   147  // and returns the index of s in the string table.
   148  func (b *profileBuilder) stringIndex(s string) int64 {
   149  	id, ok := b.stringMap[s]
   150  	if !ok {
   151  		id = len(b.strings)
   152  		b.strings = append(b.strings, s)
   153  		b.stringMap[s] = id
   154  	}
   155  	return int64(id)
   156  }
   157  
   158  func (b *profileBuilder) flush() {
   159  	const dataFlush = 4096
   160  	if b.pb.nest == 0 && len(b.pb.data) > dataFlush {
   161  		b.zw.Write(b.pb.data)
   162  		b.pb.data = b.pb.data[:0]
   163  	}
   164  }
   165  
   166  // pbValueType encodes a ValueType message to b.pb.
   167  func (b *profileBuilder) pbValueType(tag int, typ, unit string) {
   168  	start := b.pb.startMessage()
   169  	b.pb.int64(tagValueType_Type, b.stringIndex(typ))
   170  	b.pb.int64(tagValueType_Unit, b.stringIndex(unit))
   171  	b.pb.endMessage(tag, start)
   172  }
   173  
   174  // pbSample encodes a Sample message to b.pb.
   175  func (b *profileBuilder) pbSample(values []int64, locs []uint64, labels func()) {
   176  	start := b.pb.startMessage()
   177  	b.pb.int64s(tagSample_Value, values)
   178  	b.pb.uint64s(tagSample_Location, locs)
   179  	if labels != nil {
   180  		labels()
   181  	}
   182  	b.pb.endMessage(tagProfile_Sample, start)
   183  	b.flush()
   184  }
   185  
   186  // pbLabel encodes a Label message to b.pb.
   187  func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) {
   188  	start := b.pb.startMessage()
   189  	b.pb.int64Opt(tagLabel_Key, b.stringIndex(key))
   190  	b.pb.int64Opt(tagLabel_Str, b.stringIndex(str))
   191  	b.pb.int64Opt(tagLabel_Num, num)
   192  	b.pb.endMessage(tag, start)
   193  }
   194  
   195  // pbLine encodes a Line message to b.pb.
   196  func (b *profileBuilder) pbLine(tag int, funcID uint64, line int64) {
   197  	start := b.pb.startMessage()
   198  	b.pb.uint64Opt(tagLine_FunctionID, funcID)
   199  	b.pb.int64Opt(tagLine_Line, line)
   200  	b.pb.endMessage(tag, start)
   201  }
   202  
   203  // pbMapping encodes a Mapping message to b.pb.
   204  func (b *profileBuilder) pbMapping(tag int, id, base, limit, offset uint64, file, buildID string, hasFuncs bool) {
   205  	start := b.pb.startMessage()
   206  	b.pb.uint64Opt(tagMapping_ID, id)
   207  	b.pb.uint64Opt(tagMapping_Start, base)
   208  	b.pb.uint64Opt(tagMapping_Limit, limit)
   209  	b.pb.uint64Opt(tagMapping_Offset, offset)
   210  	b.pb.int64Opt(tagMapping_Filename, b.stringIndex(file))
   211  	b.pb.int64Opt(tagMapping_BuildID, b.stringIndex(buildID))
   212  	// TODO: we set HasFunctions if all symbols from samples were symbolized (hasFuncs).
   213  	// Decide what to do about HasInlineFrames and HasLineNumbers.
   214  	// Also, another approach to handle the mapping entry with
   215  	// incomplete symbolization results is to dupliace the mapping
   216  	// entry (but with different Has* fields values) and use
   217  	// different entries for symbolized locations and unsymbolized locations.
   218  	if hasFuncs {
   219  		b.pb.bool(tagMapping_HasFunctions, true)
   220  	}
   221  	b.pb.endMessage(tag, start)
   222  }
   223  
   224  func allFrames(addr uintptr) ([]runtime.Frame, symbolizeFlag) {
   225  	// Expand this one address using CallersFrames so we can cache
   226  	// each expansion. In general, CallersFrames takes a whole
   227  	// stack, but in this case we know there will be no skips in
   228  	// the stack and we have return PCs anyway.
   229  	frames := runtime.CallersFrames([]uintptr{addr})
   230  	frame, more := frames.Next()
   231  	if frame.Function == "runtime.goexit" || frame.Function == "runtime.kickoff" {
   232  		// Short-circuit if we see runtime.goexit so the loop
   233  		// below doesn't allocate a useless empty location.
   234  		return nil, 0
   235  	}
   236  
   237  	symbolizeResult := lookupTried
   238  	if frame.PC == 0 || frame.Function == "" || frame.File == "" || frame.Line == 0 {
   239  		symbolizeResult |= lookupFailed
   240  	}
   241  
   242  	if frame.PC == 0 {
   243  		// If we failed to resolve the frame, at least make up
   244  		// a reasonable call PC. This mostly happens in tests.
   245  		frame.PC = addr - 1
   246  	}
   247  	ret := []runtime.Frame{frame}
   248  	for frame.Function != "runtime.goexit" && frame.Function != "runtime.kickoff" && more == true {
   249  		frame, more = frames.Next()
   250  		ret = append(ret, frame)
   251  	}
   252  	return ret, symbolizeResult
   253  }
   254  
   255  type locInfo struct {
   256  	// location id assigned by the profileBuilder
   257  	id uint64
   258  
   259  	// sequence of PCs, including the fake PCs returned by the traceback
   260  	// to represent inlined functions
   261  	// https://github.com/golang/go/blob/d6f2f833c93a41ec1c68e49804b8387a06b131c5/src/runtime/traceback.go#L347-L368
   262  	pcs []uintptr
   263  }
   264  
   265  // newProfileBuilder returns a new profileBuilder.
   266  // CPU profiling data obtained from the runtime can be added
   267  // by calling b.addCPUData, and then the eventual profile
   268  // can be obtained by calling b.finish.
   269  func newProfileBuilder(w io.Writer) *profileBuilder {
   270  	zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed)
   271  	b := &profileBuilder{
   272  		w:         w,
   273  		zw:        zw,
   274  		start:     time.Now(),
   275  		strings:   []string{""},
   276  		stringMap: map[string]int{"": 0},
   277  		locs:      map[uintptr]locInfo{},
   278  		funcs:     map[string]int{},
   279  	}
   280  	b.readMapping()
   281  	return b
   282  }
   283  
   284  // addCPUData adds the CPU profiling data to the profile.
   285  // The data must be a whole number of records,
   286  // as delivered by the runtime.
   287  func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error {
   288  	if !b.havePeriod {
   289  		// first record is period
   290  		if len(data) < 3 {
   291  			return fmt.Errorf("truncated profile")
   292  		}
   293  		if data[0] != 3 || data[2] == 0 {
   294  			return fmt.Errorf("malformed profile")
   295  		}
   296  		// data[2] is sampling rate in Hz. Convert to sampling
   297  		// period in nanoseconds.
   298  		b.period = 1e9 / int64(data[2])
   299  		b.havePeriod = true
   300  		data = data[3:]
   301  	}
   302  
   303  	// Parse CPU samples from the profile.
   304  	// Each sample is 3+n uint64s:
   305  	//	data[0] = 3+n
   306  	//	data[1] = time stamp (ignored)
   307  	//	data[2] = count
   308  	//	data[3:3+n] = stack
   309  	// If the count is 0 and the stack has length 1,
   310  	// that's an overflow record inserted by the runtime
   311  	// to indicate that stack[0] samples were lost.
   312  	// Otherwise the count is usually 1,
   313  	// but in a few special cases like lost non-Go samples
   314  	// there can be larger counts.
   315  	// Because many samples with the same stack arrive,
   316  	// we want to deduplicate immediately, which we do
   317  	// using the b.m profMap.
   318  	for len(data) > 0 {
   319  		if len(data) < 3 || data[0] > uint64(len(data)) {
   320  			return fmt.Errorf("truncated profile")
   321  		}
   322  		if data[0] < 3 || tags != nil && len(tags) < 1 {
   323  			return fmt.Errorf("malformed profile")
   324  		}
   325  		count := data[2]
   326  		stk := data[3:data[0]]
   327  		data = data[data[0]:]
   328  		var tag unsafe.Pointer
   329  		if tags != nil {
   330  			tag = tags[0]
   331  			tags = tags[1:]
   332  		}
   333  
   334  		if count == 0 && len(stk) == 1 {
   335  			// overflow record
   336  			count = uint64(stk[0])
   337  			stk = []uint64{
   338  				uint64(funcPC(lostProfileEvent)),
   339  			}
   340  		}
   341  		b.m.lookup(stk, tag).count += int64(count)
   342  	}
   343  	return nil
   344  }
   345  
   346  // build completes and returns the constructed profile.
   347  func (b *profileBuilder) build() {
   348  	b.end = time.Now()
   349  
   350  	b.pb.int64Opt(tagProfile_TimeNanos, b.start.UnixNano())
   351  	if b.havePeriod { // must be CPU profile
   352  		b.pbValueType(tagProfile_SampleType, "samples", "count")
   353  		b.pbValueType(tagProfile_SampleType, "cpu", "nanoseconds")
   354  		b.pb.int64Opt(tagProfile_DurationNanos, b.end.Sub(b.start).Nanoseconds())
   355  		b.pbValueType(tagProfile_PeriodType, "cpu", "nanoseconds")
   356  		b.pb.int64Opt(tagProfile_Period, b.period)
   357  	}
   358  
   359  	values := []int64{0, 0}
   360  	var locs []uint64
   361  
   362  	for e := b.m.all; e != nil; e = e.nextAll {
   363  		values[0] = e.count
   364  		values[1] = e.count * b.period
   365  
   366  		var labels func()
   367  		if e.tag != nil {
   368  			labels = func() {
   369  				for k, v := range *(*labelMap)(e.tag) {
   370  					b.pbLabel(tagSample_Label, k, v, 0)
   371  				}
   372  			}
   373  		}
   374  
   375  		locs = b.appendLocsForStack(locs[:0], e.stk)
   376  
   377  		b.pbSample(values, locs, labels)
   378  	}
   379  
   380  	for i, m := range b.mem {
   381  		hasFunctions := m.funcs == lookupTried // lookupTried but not lookupFailed
   382  		b.pbMapping(tagProfile_Mapping, uint64(i+1), uint64(m.start), uint64(m.end), m.offset, m.file, m.buildID, hasFunctions)
   383  	}
   384  
   385  	// TODO: Anything for tagProfile_DropFrames?
   386  	// TODO: Anything for tagProfile_KeepFrames?
   387  
   388  	b.pb.strings(tagProfile_StringTable, b.strings)
   389  	b.zw.Write(b.pb.data)
   390  	b.zw.Close()
   391  }
   392  
   393  // appendLocsForStack appends the location IDs for the given stack trace to the given
   394  // location ID slice, locs. The addresses in the stack are return PCs or 1 + the PC of
   395  // an inline marker as the runtime traceback function returns.
   396  //
   397  // It may emit to b.pb, so there must be no message encoding in progress.
   398  func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) {
   399  	b.deck.reset()
   400  	for len(stk) > 0 {
   401  		addr := stk[0]
   402  		if l, ok := b.locs[addr]; ok {
   403  			// first record the location if there is any pending accumulated info.
   404  			if id := b.emitLocation(); id > 0 {
   405  				locs = append(locs, id)
   406  			}
   407  
   408  			// then, record the cached location.
   409  			locs = append(locs, l.id)
   410  
   411  			// The stk may be truncated due to the stack depth limit
   412  			// (e.g. See maxStack and maxCPUProfStack in runtime) or
   413  			// bugs in runtime. Avoid the crash in either case.
   414  			// TODO(hyangah): The correct fix may require using the exact
   415  			// pcs as the key for b.locs cache management instead of just
   416  			// relying on the very first pc. We are late in the go1.14 dev
   417  			// cycle, so this is a workaround with little code change.
   418  			if len(l.pcs) > len(stk) {
   419  				stk = nil
   420  				// TODO(hyangah): would be nice if we can enable
   421  				// debug print out on demand and report the problematic
   422  				// cached location entry and stack traces. Do we already
   423  				// have such facility to utilize (e.g. GODEBUG)?
   424  			} else {
   425  				stk = stk[len(l.pcs):] // skip the matching pcs.
   426  			}
   427  			continue
   428  		}
   429  
   430  		frames, symbolizeResult := allFrames(addr)
   431  		if len(frames) == 0 { // runtime.goexit.
   432  			if id := b.emitLocation(); id > 0 {
   433  				locs = append(locs, id)
   434  			}
   435  			stk = stk[1:]
   436  			continue
   437  		}
   438  
   439  		if added := b.deck.tryAdd(addr, frames, symbolizeResult); added {
   440  			stk = stk[1:]
   441  			continue
   442  		}
   443  		// add failed because this addr is not inlined with
   444  		// the existing PCs in the deck. Flush the deck and retry to
   445  		// handle this pc.
   446  		if id := b.emitLocation(); id > 0 {
   447  			locs = append(locs, id)
   448  		}
   449  
   450  		// check cache again - previous emitLocation added a new entry
   451  		if l, ok := b.locs[addr]; ok {
   452  			locs = append(locs, l.id)
   453  			stk = stk[len(l.pcs):] // skip the matching pcs.
   454  		} else {
   455  			b.deck.tryAdd(addr, frames, symbolizeResult) // must succeed.
   456  			stk = stk[1:]
   457  		}
   458  	}
   459  	if id := b.emitLocation(); id > 0 { // emit remaining location.
   460  		locs = append(locs, id)
   461  	}
   462  	return locs
   463  }
   464  
   465  // pcDeck is a helper to detect a sequence of inlined functions from
   466  // a stack trace returned by the runtime.
   467  //
   468  // The stack traces returned by runtime's trackback functions are fully
   469  // expanded (at least for Go functions) and include the fake pcs representing
   470  // inlined functions. The profile proto expects the inlined functions to be
   471  // encoded in one Location message.
   472  // https://github.com/google/pprof/blob/5e965273ee43930341d897407202dd5e10e952cb/proto/profile.proto#L177-L184
   473  //
   474  // Runtime does not directly expose whether a frame is for an inlined function
   475  // and looking up debug info is not ideal, so we use a heuristic to filter
   476  // the fake pcs and restore the inlined and entry functions. Inlined functions
   477  // have the following properties:
   478  //   Frame's Func is nil (note: also true for non-Go functions), and
   479  //   Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions),
   480  //   Frame's Name does not match its entry function frame's name.
   481  //
   482  // As reading and processing the pcs in a stack trace one by one (from leaf to the root),
   483  // we use pcDeck to temporarily hold the observed pcs and their expanded frames
   484  // until we observe the entry function frame.
   485  type pcDeck struct {
   486  	pcs             []uintptr
   487  	frames          []runtime.Frame
   488  	symbolizeResult symbolizeFlag
   489  }
   490  
   491  func (d *pcDeck) reset() {
   492  	d.pcs = d.pcs[:0]
   493  	d.frames = d.frames[:0]
   494  	d.symbolizeResult = 0
   495  }
   496  
   497  // tryAdd tries to add the pc and Frames expanded from it (most likely one,
   498  // since the stack trace is already fully expanded) and the symbolizeResult
   499  // to the deck. If it fails the caller needs to flush the deck and retry.
   500  func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) {
   501  	if existing := len(d.pcs); existing > 0 {
   502  		// 'frames' are all expanded from one 'pc' and represent all inlined functions
   503  		// so we check only the last one.
   504  		newFrame := frames[0]
   505  		last := d.frames[existing-1]
   506  		if last.Func != nil { // the last frame can't be inlined. Flush.
   507  			return false
   508  		}
   509  		if last.Entry == 0 || newFrame.Entry == 0 { // Possibly not a Go function. Don't try to merge.
   510  			return false
   511  		}
   512  
   513  		if last.Entry != newFrame.Entry { // newFrame is for a different function.
   514  			return false
   515  		}
   516  		if last.Function == newFrame.Function { // maybe recursion.
   517  			return false
   518  		}
   519  	}
   520  	d.pcs = append(d.pcs, pc)
   521  	d.frames = append(d.frames, frames...)
   522  	d.symbolizeResult |= symbolizeResult
   523  	return true
   524  }
   525  
   526  // emitLocation emits the new location and function information recorded in the deck
   527  // and returns the location ID encoded in the profile protobuf.
   528  // It emits to b.pb, so there must be no message encoding in progress.
   529  // It resets the deck.
   530  func (b *profileBuilder) emitLocation() uint64 {
   531  	if len(b.deck.pcs) == 0 {
   532  		return 0
   533  	}
   534  	defer b.deck.reset()
   535  
   536  	addr := b.deck.pcs[0]
   537  	firstFrame := b.deck.frames[0]
   538  
   539  	// We can't write out functions while in the middle of the
   540  	// Location message, so record new functions we encounter and
   541  	// write them out after the Location.
   542  	type newFunc struct {
   543  		id         uint64
   544  		name, file string
   545  	}
   546  	newFuncs := make([]newFunc, 0, 8)
   547  
   548  	id := uint64(len(b.locs)) + 1
   549  	b.locs[addr] = locInfo{id: id, pcs: append([]uintptr{}, b.deck.pcs...)}
   550  
   551  	start := b.pb.startMessage()
   552  	b.pb.uint64Opt(tagLocation_ID, id)
   553  	b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC))
   554  	for _, frame := range b.deck.frames {
   555  		// Write out each line in frame expansion.
   556  		funcID := uint64(b.funcs[frame.Function])
   557  		if funcID == 0 {
   558  			funcID = uint64(len(b.funcs)) + 1
   559  			b.funcs[frame.Function] = int(funcID)
   560  			newFuncs = append(newFuncs, newFunc{funcID, frame.Function, frame.File})
   561  		}
   562  		b.pbLine(tagLocation_Line, funcID, int64(frame.Line))
   563  	}
   564  	for i := range b.mem {
   565  		if b.mem[i].start <= addr && addr < b.mem[i].end || b.mem[i].fake {
   566  			b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1))
   567  
   568  			m := b.mem[i]
   569  			m.funcs |= b.deck.symbolizeResult
   570  			b.mem[i] = m
   571  			break
   572  		}
   573  	}
   574  	b.pb.endMessage(tagProfile_Location, start)
   575  
   576  	// Write out functions we found during frame expansion.
   577  	for _, fn := range newFuncs {
   578  		start := b.pb.startMessage()
   579  		b.pb.uint64Opt(tagFunction_ID, fn.id)
   580  		b.pb.int64Opt(tagFunction_Name, b.stringIndex(fn.name))
   581  		b.pb.int64Opt(tagFunction_SystemName, b.stringIndex(fn.name))
   582  		b.pb.int64Opt(tagFunction_Filename, b.stringIndex(fn.file))
   583  		b.pb.endMessage(tagProfile_Function, start)
   584  	}
   585  
   586  	b.flush()
   587  	return id
   588  }
   589  
   590  // readMapping reads /proc/self/maps and writes mappings to b.pb.
   591  // It saves the address ranges of the mappings in b.mem for use
   592  // when emitting locations.
   593  func (b *profileBuilder) readMapping() {
   594  	data, _ := ioutil.ReadFile("/proc/self/maps")
   595  	parseProcSelfMaps(data, b.addMapping)
   596  	if len(b.mem) == 0 { // pprof expects a map entry, so fake one.
   597  		b.addMappingEntry(0, 0, 0, "", "", true)
   598  		// TODO(hyangah): make addMapping return *memMap or
   599  		// take a memMap struct, and get rid of addMappingEntry
   600  		// that takes a bunch of positional arguments.
   601  	}
   602  }
   603  
   604  func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) {
   605  	// $ cat /proc/self/maps
   606  	// 00400000-0040b000 r-xp 00000000 fc:01 787766                             /bin/cat
   607  	// 0060a000-0060b000 r--p 0000a000 fc:01 787766                             /bin/cat
   608  	// 0060b000-0060c000 rw-p 0000b000 fc:01 787766                             /bin/cat
   609  	// 014ab000-014cc000 rw-p 00000000 00:00 0                                  [heap]
   610  	// 7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064                    /usr/lib/locale/locale-archive
   611  	// 7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
   612  	// 7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
   613  	// 7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
   614  	// 7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
   615  	// 7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0
   616  	// 7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
   617  	// 7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0
   618  	// 7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0
   619  	// 7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
   620  	// 7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
   621  	// 7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0
   622  	// 7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0                          [stack]
   623  	// 7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0                          [vdso]
   624  	// ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]
   625  
   626  	var line []byte
   627  	// next removes and returns the next field in the line.
   628  	// It also removes from line any spaces following the field.
   629  	next := func() []byte {
   630  		j := bytes.IndexByte(line, ' ')
   631  		if j < 0 {
   632  			f := line
   633  			line = nil
   634  			return f
   635  		}
   636  		f := line[:j]
   637  		line = line[j+1:]
   638  		for len(line) > 0 && line[0] == ' ' {
   639  			line = line[1:]
   640  		}
   641  		return f
   642  	}
   643  
   644  	for len(data) > 0 {
   645  		i := bytes.IndexByte(data, '\n')
   646  		if i < 0 {
   647  			line, data = data, nil
   648  		} else {
   649  			line, data = data[:i], data[i+1:]
   650  		}
   651  		addr := next()
   652  		i = bytes.IndexByte(addr, '-')
   653  		if i < 0 {
   654  			continue
   655  		}
   656  		lo, err := strconv.ParseUint(string(addr[:i]), 16, 64)
   657  		if err != nil {
   658  			continue
   659  		}
   660  		hi, err := strconv.ParseUint(string(addr[i+1:]), 16, 64)
   661  		if err != nil {
   662  			continue
   663  		}
   664  		perm := next()
   665  		if len(perm) < 4 || perm[2] != 'x' {
   666  			// Only interested in executable mappings.
   667  			continue
   668  		}
   669  		offset, err := strconv.ParseUint(string(next()), 16, 64)
   670  		if err != nil {
   671  			continue
   672  		}
   673  		next()          // dev
   674  		inode := next() // inode
   675  		if line == nil {
   676  			continue
   677  		}
   678  		file := string(line)
   679  
   680  		// Trim deleted file marker.
   681  		deletedStr := " (deleted)"
   682  		deletedLen := len(deletedStr)
   683  		if len(file) >= deletedLen && file[len(file)-deletedLen:] == deletedStr {
   684  			file = file[:len(file)-deletedLen]
   685  		}
   686  
   687  		if len(inode) == 1 && inode[0] == '0' && file == "" {
   688  			// Huge-page text mappings list the initial fragment of
   689  			// mapped but unpopulated memory as being inode 0.
   690  			// Don't report that part.
   691  			// But [vdso] and [vsyscall] are inode 0, so let non-empty file names through.
   692  			continue
   693  		}
   694  
   695  		// TODO: pprof's remapMappingIDs makes two adjustments:
   696  		// 1. If there is an /anon_hugepage mapping first and it is
   697  		// consecutive to a next mapping, drop the /anon_hugepage.
   698  		// 2. If start-offset = 0x400000, change start to 0x400000 and offset to 0.
   699  		// There's no indication why either of these is needed.
   700  		// Let's try not doing these and see what breaks.
   701  		// If we do need them, they would go here, before we
   702  		// enter the mappings into b.mem in the first place.
   703  
   704  		buildID, _ := elfBuildID(file)
   705  		addMapping(lo, hi, offset, file, buildID)
   706  	}
   707  }
   708  
   709  func (b *profileBuilder) addMapping(lo, hi, offset uint64, file, buildID string) {
   710  	b.addMappingEntry(lo, hi, offset, file, buildID, false)
   711  }
   712  
   713  func (b *profileBuilder) addMappingEntry(lo, hi, offset uint64, file, buildID string, fake bool) {
   714  	b.mem = append(b.mem, memMap{
   715  		start:   uintptr(lo),
   716  		end:     uintptr(hi),
   717  		offset:  offset,
   718  		file:    file,
   719  		buildID: buildID,
   720  		fake:    fake,
   721  	})
   722  }