github.com/grafana/pyroscope@v1.18.0/pkg/phlaredb/symdb/partition_memory.go (about)

     1  package symdb
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"sync"
     7  
     8  	schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1"
     9  )
    10  
    11  type PartitionWriter struct {
    12  	header PartitionHeader
    13  
    14  	stacktraces *stacktraces
    15  	strings     deduplicatingSlice[string, string, *stringsHelper]
    16  	mappings    deduplicatingSlice[schemav1.InMemoryMapping, mappingsKey, *mappingsHelper]
    17  	functions   deduplicatingSlice[schemav1.InMemoryFunction, functionsKey, *functionsHelper]
    18  	locations   deduplicatingSlice[schemav1.InMemoryLocation, locationsKey, *locationsHelper]
    19  }
    20  
    21  func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace) {
    22  	p.stacktraces.append(dst, s)
    23  }
    24  
    25  func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error {
    26  	if len(stacktraces) == 0 {
    27  		return nil
    28  	}
    29  	return p.stacktraces.resolve(dst, stacktraces)
    30  }
    31  
    32  func (p *PartitionWriter) LookupLocations(dst []uint64, stacktraceID uint32) []uint64 {
    33  	dst = dst[:0]
    34  	if stacktraceID == 0 {
    35  		return dst
    36  	}
    37  	return p.stacktraces.tree.resolveUint64(dst, stacktraceID)
    38  }
    39  
    40  func newStacktraces() *stacktraces {
    41  	p := &stacktraces{
    42  		hashToIdx: make(map[uint64]uint32),
    43  		tree:      newStacktraceTree(defaultStacktraceTreeSize),
    44  	}
    45  	return p
    46  }
    47  
    48  type stacktraces struct {
    49  	m         sync.RWMutex
    50  	hashToIdx map[uint64]uint32
    51  	tree      *stacktraceTree
    52  	stacks    uint32
    53  }
    54  
    55  func (p *stacktraces) size() uint64 {
    56  	p.m.RLock()
    57  	// TODO: map footprint isn't accounted
    58  	v := stacktraceTreeNodeSize * cap(p.tree.nodes)
    59  	p.m.RUnlock()
    60  	return uint64(v)
    61  }
    62  
    63  func (p *stacktraces) append(dst []uint32, s []*schemav1.Stacktrace) {
    64  	if len(s) == 0 {
    65  		return
    66  	}
    67  
    68  	var (
    69  		id     uint32
    70  		found  bool
    71  		misses int
    72  	)
    73  
    74  	p.m.RLock()
    75  	for i, x := range s {
    76  		if dst[i], found = p.hashToIdx[hashLocations(x.LocationIDs)]; !found {
    77  			misses++
    78  		}
    79  	}
    80  
    81  	p.m.RUnlock()
    82  	if misses == 0 {
    83  		return
    84  	}
    85  
    86  	// NOTE(kolesnikovae):
    87  	//
    88  	// Maybe we don't need this map at all: tree insertion might be
    89  	// done in a thread safe fashion, and optimised to the extent
    90  	// that its performance is comparable with:
    91  	//   map_read + r_(un)lock + map_overhead +
    92  	//   miss_rate * (map_write + w_(un)lock)
    93  	//
    94  	// Instead of inserting stacks one by one, it is better to
    95  	// build a tree, and merge it to the existing one.
    96  
    97  	p.m.Lock()
    98  	defer p.m.Unlock()
    99  	for i, v := range dst[:len(s)] {
   100  		if v != 0 {
   101  			// Already resolved. ID 0 is reserved
   102  			// as it is the tree root.
   103  			continue
   104  		}
   105  		x := s[i].LocationIDs
   106  		// Tree insertion is idempotent,
   107  		// we don't need to check the map.
   108  		id = p.tree.insert(x)
   109  		h := hashLocations(x)
   110  		p.hashToIdx[h] = id
   111  		dst[i] = id
   112  	}
   113  }
   114  
   115  const defaultStacktraceDepth = 64
   116  
   117  var stacktraceLocations = stacktraceLocationsPool{
   118  	Pool: sync.Pool{New: func() any { return make([]int32, 0, defaultStacktraceDepth) }},
   119  }
   120  
   121  type stacktraceLocationsPool struct{ sync.Pool }
   122  
   123  func (p *stacktraceLocationsPool) get() []int32 {
   124  	return stacktraceLocations.Get().([]int32)
   125  }
   126  
   127  func (p *stacktraceLocationsPool) put(x []int32) {
   128  	stacktraceLocations.Put(x)
   129  }
   130  
   131  func (p *stacktraces) resolve(dst StacktraceInserter, stacktraces []uint32) (err error) {
   132  	p.m.RLock()
   133  	t := stacktraceTree{nodes: p.tree.nodes}
   134  	// tree.resolve is thread safe: only the parent node index (p)
   135  	// and the reference to location (r) node fields are accessed,
   136  	// which are never modified after insertion.
   137  	//
   138  	// Nevertheless, the node slice header should be copied to avoid
   139  	// races when the slice grows: in the worst case, the underlying
   140  	// capacity will be retained and thus not be eligible for GC during
   141  	// the call.
   142  	p.m.RUnlock()
   143  	s := stacktraceLocations.get()
   144  	for _, sid := range stacktraces {
   145  		s = t.resolve(s, sid)
   146  		dst.InsertStacktrace(sid, s)
   147  	}
   148  	stacktraceLocations.put(s)
   149  	return nil
   150  }
   151  
   152  func (p *stacktraces) WriteTo(dst io.Writer) (int64, error) {
   153  	return p.tree.WriteTo(dst)
   154  }
   155  
   156  func (p *PartitionWriter) AppendLocations(dst []uint32, locations []schemav1.InMemoryLocation) {
   157  	p.locations.append(dst, locations)
   158  }
   159  
   160  func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []schemav1.InMemoryMapping) {
   161  	p.mappings.append(dst, mappings)
   162  }
   163  
   164  func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []schemav1.InMemoryFunction) {
   165  	p.functions.append(dst, functions)
   166  }
   167  
   168  func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string) {
   169  	p.strings.append(dst, strings)
   170  }
   171  
   172  func (p *PartitionWriter) Symbols() *Symbols {
   173  	return &Symbols{
   174  		Stacktraces: p,
   175  		Locations:   p.locations.sliceHeaderCopy(),
   176  		Mappings:    p.mappings.sliceHeaderCopy(),
   177  		Functions:   p.functions.sliceHeaderCopy(),
   178  		Strings:     p.strings.sliceHeaderCopy(),
   179  	}
   180  }
   181  
   182  func (p *PartitionWriter) WriteStats(s *PartitionStats) {
   183  	p.stacktraces.m.RLock()
   184  	s.MaxStacktraceID = int(p.stacktraces.tree.len())
   185  	s.StacktracesTotal = len(p.stacktraces.hashToIdx)
   186  	p.stacktraces.m.RUnlock()
   187  
   188  	p.mappings.lock.RLock()
   189  	s.MappingsTotal = len(p.mappings.slice)
   190  	p.mappings.lock.RUnlock()
   191  
   192  	p.functions.lock.RLock()
   193  	s.FunctionsTotal = len(p.functions.slice)
   194  	p.functions.lock.RUnlock()
   195  
   196  	p.locations.lock.RLock()
   197  	s.LocationsTotal += len(p.locations.slice)
   198  	p.locations.lock.RUnlock()
   199  
   200  	p.strings.lock.RLock()
   201  	s.StringsTotal += len(p.strings.slice)
   202  	p.strings.lock.RUnlock()
   203  }
   204  
   205  func (p *PartitionWriter) Release() {
   206  	// Noop. Satisfies PartitionReader interface.
   207  }