github.com/grafana/pyroscope-go/godeltaprof@v0.1.8-0.20240513050943-1b1f97373e2a/internal/pprof/delta_heap.go (about)

     1  package pprof
     2  
     3  import (
     4  	"io"
     5  	"math"
     6  	"runtime"
     7  	"strings"
     8  )
     9  
    10  type DeltaHeapProfiler struct {
    11  	m       profMap
    12  	mem     []memMap
    13  	Options ProfileBuilderOptions
    14  }
    15  
    16  // WriteHeapProto writes the current heap profile in protobuf format to w.
    17  func (d *DeltaHeapProfiler) WriteHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defaultSampleType string) error {
    18  	if d.mem == nil || !d.Options.LazyMapping {
    19  		d.mem = readMapping()
    20  	}
    21  	b := newProfileBuilder(w, d.Options, d.mem)
    22  	b.pbValueType(tagProfile_PeriodType, "space", "bytes")
    23  	b.pb.int64Opt(tagProfile_Period, rate)
    24  	b.pbValueType(tagProfile_SampleType, "alloc_objects", "count")
    25  	b.pbValueType(tagProfile_SampleType, "alloc_space", "bytes")
    26  	b.pbValueType(tagProfile_SampleType, "inuse_objects", "count")
    27  	b.pbValueType(tagProfile_SampleType, "inuse_space", "bytes")
    28  	if defaultSampleType != "" {
    29  		b.pb.int64Opt(tagProfile_DefaultSampleType, b.stringIndex(defaultSampleType))
    30  	}
    31  
    32  	values := []int64{0, 0, 0, 0}
    33  	var locs []uint64
    34  	for _, r := range p {
    35  		// do the delta
    36  		if r.AllocBytes == 0 && r.AllocObjects == 0 && r.FreeObjects == 0 && r.FreeBytes == 0 {
    37  			// it is a fresh bucket and it will be published after next 1-2 gc cycles
    38  			continue
    39  		}
    40  		var blockSize int64
    41  		if r.AllocObjects > 0 {
    42  			blockSize = r.AllocBytes / r.AllocObjects
    43  		}
    44  		entry := d.m.Lookup(r.Stack(), uintptr(blockSize))
    45  
    46  		if (r.AllocObjects - entry.count.v1) < 0 {
    47  			continue
    48  		}
    49  		AllocObjects := r.AllocObjects - entry.count.v1
    50  		AllocBytes := r.AllocBytes - entry.count.v2
    51  		entry.count.v1 = r.AllocObjects
    52  		entry.count.v2 = r.AllocBytes
    53  
    54  		values[0], values[1] = scaleHeapSample(AllocObjects, AllocBytes, rate)
    55  		values[2], values[3] = scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate)
    56  
    57  		if values[0] == 0 && values[1] == 0 && values[2] == 0 && values[3] == 0 {
    58  			continue
    59  		}
    60  
    61  		hideRuntime := true
    62  		for tries := 0; tries < 2; tries++ {
    63  			stk := r.Stack()
    64  			// For heap profiles, all stack
    65  			// addresses are return PCs, which is
    66  			// what appendLocsForStack expects.
    67  			if hideRuntime {
    68  				for i, addr := range stk {
    69  					if f := runtime.FuncForPC(addr); f != nil && strings.HasPrefix(f.Name(), "runtime.") {
    70  						continue
    71  					}
    72  					// Found non-runtime. Show any runtime uses above it.
    73  					stk = stk[i:]
    74  					break
    75  				}
    76  			}
    77  			locs = b.appendLocsForStack(locs[:0], stk)
    78  			if len(locs) > 0 {
    79  				break
    80  			}
    81  			hideRuntime = false // try again, and show all frames next time.
    82  		}
    83  
    84  		b.pbSample(values, locs, func() {
    85  			if blockSize != 0 {
    86  				b.pbLabel(tagSample_Label, "bytes", "", blockSize)
    87  			}
    88  		})
    89  	}
    90  	b.build()
    91  	return nil
    92  }
    93  
    94  // scaleHeapSample adjusts the data from a heap Sample to
    95  // account for its probability of appearing in the collected
    96  // data. heap profiles are a sampling of the memory allocations
    97  // requests in a program. We estimate the unsampled value by dividing
    98  // each collected sample by its probability of appearing in the
    99  // profile. heap profiles rely on a poisson process to determine
   100  // which samples to collect, based on the desired average collection
   101  // rate R. The probability of a sample of size S to appear in that
   102  // profile is 1-exp(-S/R).
   103  func scaleHeapSample(count, size, rate int64) (int64, int64) {
   104  	if count == 0 || size == 0 {
   105  		return 0, 0
   106  	}
   107  
   108  	if rate <= 1 {
   109  		// if rate==1 all samples were collected so no adjustment is needed.
   110  		// if rate<1 treat as unknown and skip scaling.
   111  		return count, size
   112  	}
   113  
   114  	avgSize := float64(size) / float64(count)
   115  	scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
   116  
   117  	return int64(float64(count) * scale), int64(float64(size) * scale)
   118  }