github.com/pyroscope-io/godeltaprof@v0.1.3-0.20230906152420-0d7eeca7b8c1/internal/pprof/delta_heap.go (about) 1 package pprof 2 3 import ( 4 "io" 5 "math" 6 "runtime" 7 "strings" 8 ) 9 10 type DeltaHeapProfiler struct { 11 m profMap 12 } 13 14 // WriteHeapProto writes the current heap profile in protobuf format to w. 15 func (d *DeltaHeapProfiler) WriteHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defaultSampleType string) error { 16 b := newProfileBuilder(w) 17 b.pbValueType(tagProfile_PeriodType, "space", "bytes") 18 b.pb.int64Opt(tagProfile_Period, rate) 19 b.pbValueType(tagProfile_SampleType, "alloc_objects", "count") 20 b.pbValueType(tagProfile_SampleType, "alloc_space", "bytes") 21 b.pbValueType(tagProfile_SampleType, "inuse_objects", "count") 22 b.pbValueType(tagProfile_SampleType, "inuse_space", "bytes") 23 if defaultSampleType != "" { 24 b.pb.int64Opt(tagProfile_DefaultSampleType, b.stringIndex(defaultSampleType)) 25 } 26 27 values := []int64{0, 0, 0, 0} 28 var locs []uint64 29 for _, r := range p { 30 hideRuntime := true 31 for tries := 0; tries < 2; tries++ { 32 stk := r.Stack() 33 // For heap profiles, all stack 34 // addresses are return PCs, which is 35 // what appendLocsForStack expects. 36 if hideRuntime { 37 for i, addr := range stk { 38 if f := runtime.FuncForPC(addr); f != nil && strings.HasPrefix(f.Name(), "runtime.") { 39 continue 40 } 41 // Found non-runtime. Show any runtime uses above it. 42 stk = stk[i:] 43 break 44 } 45 } 46 locs = b.appendLocsForStack(locs[:0], stk) 47 if len(locs) > 0 { 48 break 49 } 50 hideRuntime = false // try again, and show all frames next time. 51 } 52 53 // do the delta 54 if r.AllocBytes == 0 && r.AllocObjects == 0 && r.FreeObjects == 0 && r.FreeBytes == 0 { 55 // it is a fresh bucket and it will be published after next 1-2 gc cycles 56 continue 57 } 58 var blockSize int64 59 if r.AllocObjects > 0 { 60 blockSize = r.AllocBytes / r.AllocObjects 61 } 62 entry := d.m.Lookup(r.Stack(), uintptr(blockSize)) 63 64 if (r.AllocObjects - entry.count.v1) < 0 { 65 continue 66 } 67 AllocObjects := r.AllocObjects - entry.count.v1 68 AllocBytes := r.AllocBytes - entry.count.v2 69 entry.count.v1 = r.AllocObjects 70 entry.count.v2 = r.AllocBytes 71 72 values[0], values[1] = scaleHeapSample(AllocObjects, AllocBytes, rate) 73 values[2], values[3] = scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate) 74 75 if values[0] == 0 && values[1] == 0 && values[2] == 0 && values[3] == 0 { 76 continue 77 } 78 79 b.pbSample(values, locs, func() { 80 if blockSize != 0 { 81 b.pbLabel(tagSample_Label, "bytes", "", blockSize) 82 } 83 }) 84 } 85 b.build() 86 return nil 87 } 88 89 // scaleHeapSample adjusts the data from a heap Sample to 90 // account for its probability of appearing in the collected 91 // data. heap profiles are a sampling of the memory allocations 92 // requests in a program. We estimate the unsampled value by dividing 93 // each collected sample by its probability of appearing in the 94 // profile. heap profiles rely on a poisson process to determine 95 // which samples to collect, based on the desired average collection 96 // rate R. The probability of a sample of size S to appear in that 97 // profile is 1-exp(-S/R). 98 func scaleHeapSample(count, size, rate int64) (int64, int64) { 99 if count == 0 || size == 0 { 100 return 0, 0 101 } 102 103 if rate <= 1 { 104 // if rate==1 all samples were collected so no adjustment is needed. 105 // if rate<1 treat as unknown and skip scaling. 106 return count, size 107 } 108 109 avgSize := float64(size) / float64(count) 110 scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) 111 112 return int64(float64(count) * scale), int64(float64(size) * scale) 113 }