github.com/grafana/pyroscope@v1.18.0/pkg/phlaredb/phlaredb.go (about)

     1  package phlaredb
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"flag"
     7  	"fmt"
     8  	"math"
     9  	"os"
    10  	"path/filepath"
    11  	"slices"
    12  	"sync"
    13  	"time"
    14  
    15  	"connectrpc.com/connect"
    16  	"github.com/dustin/go-humanize"
    17  	"github.com/go-kit/log"
    18  	"github.com/go-kit/log/level"
    19  	"github.com/google/uuid"
    20  	"github.com/grafana/dskit/multierror"
    21  	"github.com/grafana/dskit/services"
    22  	"github.com/oklog/ulid/v2"
    23  	"github.com/opentracing/opentracing-go"
    24  	"github.com/prometheus/common/model"
    25  	"github.com/samber/lo"
    26  
    27  	profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1"
    28  	ingestv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1"
    29  	typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
    30  	phlaremodel "github.com/grafana/pyroscope/pkg/model"
    31  	phlareobj "github.com/grafana/pyroscope/pkg/objstore"
    32  	"github.com/grafana/pyroscope/pkg/phlaredb/block"
    33  	"github.com/grafana/pyroscope/pkg/phlaredb/symdb"
    34  	phlarecontext "github.com/grafana/pyroscope/pkg/pyroscope/context"
    35  	"github.com/grafana/pyroscope/pkg/util"
    36  )
    37  
    38  const (
    39  	DefaultMinFreeDisk                        = 10
    40  	DefaultMinDiskAvailablePercentage         = 0.05
    41  	DefaultRetentionPolicyEnforcementInterval = 5 * time.Minute
    42  	DefaultRetentionExpiry                    = 4 * time.Hour // Same as default `querier.query_store_after`.
    43  )
    44  
    45  type Config struct {
    46  	DataPath string `yaml:"data_path,omitempty"`
    47  	// Blocks are generally cut once they reach 1000M of memory size, this will setup an upper limit to the duration of data that a block has that is cut by the ingester.
    48  	MaxBlockDuration time.Duration `yaml:"max_block_duration,omitempty"`
    49  
    50  	// TODO: docs
    51  	RowGroupTargetSize    uint64 `yaml:"row_group_target_size"`
    52  	SymbolsPartitionLabel string `yaml:"symbols_partition_label"`
    53  
    54  	// Those configs should not be exposed to the user, rather they should be determined by pyroscope itself.
    55  	// Currently, they are solely used for test cases.
    56  	Parquet     *ParquetConfig      `yaml:"-"`
    57  	SymDBFormat symdb.FormatVersion `yaml:"-"`
    58  
    59  	MinFreeDisk                uint64        `yaml:"min_free_disk_gb"`
    60  	MinDiskAvailablePercentage float64       `yaml:"min_disk_available_percentage"`
    61  	EnforcementInterval        time.Duration `yaml:"enforcement_interval"`
    62  	DisableEnforcement         bool          `yaml:"disable_enforcement"`
    63  }
    64  
    65  type ParquetConfig struct {
    66  	MaxBufferRowCount int
    67  	MaxRowGroupBytes  uint64 // This is the maximum row group size in bytes that the raw data uses in memory.
    68  	MaxBlockBytes     uint64 // This is the size of all parquet tables in memory after which a new block is cut
    69  }
    70  
    71  func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
    72  	f.StringVar(&cfg.DataPath, "pyroscopedb.data-path", "./data", "Directory used for local storage.")
    73  	f.DurationVar(&cfg.MaxBlockDuration, "pyroscopedb.max-block-duration", 1*time.Hour, "Upper limit to the duration of a Pyroscope block.")
    74  	f.Uint64Var(&cfg.RowGroupTargetSize, "pyroscopedb.row-group-target-size", 10*128*1024*1024, "How big should a single row group be uncompressed") // This should roughly be 128MiB compressed
    75  	f.StringVar(&cfg.SymbolsPartitionLabel, "pyroscopedb.symbols-partition-label", "", "Specifies the dimension by which symbols are partitioned. By default, the partitioning is determined automatically.")
    76  	f.Uint64Var(&cfg.MinFreeDisk, "pyroscopedb.retention-policy-min-free-disk-gb", DefaultMinFreeDisk, "How much available disk space to keep in GiB")
    77  	f.Float64Var(&cfg.MinDiskAvailablePercentage, "pyroscopedb.retention-policy-min-disk-available-percentage", DefaultMinDiskAvailablePercentage, "Which percentage of free disk space to keep")
    78  	f.DurationVar(&cfg.EnforcementInterval, "pyroscopedb.retention-policy-enforcement-interval", DefaultRetentionPolicyEnforcementInterval, "How often to enforce disk retention")
    79  	f.BoolVar(&cfg.DisableEnforcement, "pyroscopedb.retention-policy-disable", false, "Disable retention policy enforcement")
    80  }
    81  
    82  type TenantLimiter interface {
    83  	AllowProfile(fp model.Fingerprint, lbs phlaremodel.Labels, tsNano int64) error
    84  	Stop()
    85  }
    86  
    87  type PhlareDB struct {
    88  	services.Service
    89  
    90  	logger    log.Logger
    91  	phlarectx context.Context
    92  	metrics   *headMetrics
    93  
    94  	cfg    Config
    95  	stopCh chan struct{}
    96  	wg     sync.WaitGroup
    97  
    98  	headLock sync.RWMutex
    99  	// Heads per max range interval for ingest requests and reads. May be empty,
   100  	// if no ingestion requests were handled.
   101  	heads map[int64]*Head
   102  	// Read only heads. On Flush, writes are directed to
   103  	// the new head, and queries can read the former head
   104  	// till it gets written to the disk and becomes available
   105  	// to blockQuerier.
   106  	flushing []*Head
   107  
   108  	// flushLock serializes flushes. Only one flush at a time
   109  	// is allowed.
   110  	flushLock sync.Mutex
   111  
   112  	blockQuerier *BlockQuerier
   113  	limiter      TenantLimiter
   114  	evictCh      chan *blockEviction
   115  }
   116  
   117  func New(phlarectx context.Context, cfg Config, limiter TenantLimiter, fs phlareobj.Bucket) (*PhlareDB, error) {
   118  	reg := phlarecontext.Registry(phlarectx)
   119  	f := &PhlareDB{
   120  		cfg:     cfg,
   121  		logger:  phlarecontext.Logger(phlarectx),
   122  		stopCh:  make(chan struct{}),
   123  		evictCh: make(chan *blockEviction),
   124  		metrics: newHeadMetrics(reg),
   125  		limiter: limiter,
   126  		heads:   make(map[int64]*Head),
   127  	}
   128  
   129  	if err := os.MkdirAll(f.LocalDataPath(), 0o777); err != nil {
   130  		return nil, fmt.Errorf("mkdir %s: %w", f.LocalDataPath(), err)
   131  	}
   132  
   133  	// ensure head metrics are registered early so they are reused for the new head
   134  	phlarectx = contextWithHeadMetrics(phlarectx, f.metrics)
   135  	f.phlarectx = phlarectx
   136  	f.wg.Add(1)
   137  	go f.loop()
   138  
   139  	f.blockQuerier = NewBlockQuerier(phlarectx, phlareobj.NewPrefixedBucket(fs, PathLocal))
   140  
   141  	// do an initial querier sync
   142  	ctx := context.Background()
   143  	if err := f.blockQuerier.Sync(ctx); err != nil {
   144  		return nil, err
   145  	}
   146  	return f, nil
   147  }
   148  
   149  func (f *PhlareDB) LocalDataPath() string {
   150  	return filepath.Join(f.cfg.DataPath, PathLocal)
   151  }
   152  
   153  func (f *PhlareDB) BlockMetas(ctx context.Context) ([]*block.Meta, error) {
   154  	return f.blockQuerier.BlockMetas(ctx)
   155  }
   156  
   157  func (f *PhlareDB) runBlockQuerierSync(ctx context.Context) {
   158  	if err := f.blockQuerier.Sync(ctx); err != nil {
   159  		level.Error(f.logger).Log("msg", "sync of blocks failed", "err", err)
   160  	}
   161  }
   162  
   163  func (f *PhlareDB) loop() {
   164  	blockScanTicker := time.NewTicker(5 * time.Minute)
   165  	headSizeCheck := time.NewTicker(5 * time.Second)
   166  	staleHeadTicker := time.NewTimer(util.DurationWithJitter(10*time.Minute, 0.5))
   167  	maxBlockBytes := f.maxBlockBytes()
   168  	defer func() {
   169  		blockScanTicker.Stop()
   170  		headSizeCheck.Stop()
   171  		staleHeadTicker.Stop()
   172  		f.wg.Done()
   173  	}()
   174  
   175  	for {
   176  		ctx := context.Background()
   177  
   178  		select {
   179  		case <-f.stopCh:
   180  			return
   181  		case <-blockScanTicker.C:
   182  			f.runBlockQuerierSync(ctx)
   183  		case <-headSizeCheck.C:
   184  			if f.headSize() > maxBlockBytes {
   185  				f.Flush(ctx, true, flushReasonMaxBlockBytes)
   186  			}
   187  		case <-staleHeadTicker.C:
   188  			f.Flush(ctx, false, flushReasonMaxDuration)
   189  			staleHeadTicker.Reset(util.DurationWithJitter(10*time.Minute, 0.5))
   190  		case e := <-f.evictCh:
   191  			f.evictBlock(e)
   192  		}
   193  	}
   194  }
   195  
   196  // Flush start flushing heads to disk.
   197  // When force is true, all heads are flushed.
   198  // When force is false, only stale heads are flushed.
   199  // see Head.isStale for the definition of stale.
   200  func (f *PhlareDB) Flush(ctx context.Context, force bool, reason string) (err error) {
   201  	// Ensure this is the only Flush running.
   202  	f.flushLock.Lock()
   203  	defer f.flushLock.Unlock()
   204  
   205  	currentSize := f.headSize()
   206  	f.headLock.Lock()
   207  	if len(f.heads) == 0 {
   208  		f.headLock.Unlock()
   209  		return nil
   210  	}
   211  
   212  	// sweep heads for flushing
   213  	f.flushing = make([]*Head, 0, len(f.heads))
   214  	for maxT, h := range f.heads {
   215  		// Skip heads that are not stale.
   216  		if h.isStale(maxT, time.Now()) || force {
   217  			f.flushing = append(f.flushing, h)
   218  			delete(f.heads, maxT)
   219  		}
   220  	}
   221  
   222  	if len(f.flushing) != 0 {
   223  		level.Debug(f.logger).Log(
   224  			"msg", "flushing heads to disk",
   225  			"reason", reason,
   226  			"max_size", humanize.Bytes(f.maxBlockBytes()),
   227  			"current_size", humanize.Bytes(currentSize),
   228  			"num_heads", len(f.flushing),
   229  		)
   230  	}
   231  
   232  	f.headLock.Unlock()
   233  	// lock is release flushing heads are available for queries in the flushing array.
   234  	// New heads can be created and written to while the flushing heads are being flushed.
   235  	errs := multierror.New()
   236  
   237  	// flush all heads and keep only successful ones
   238  	successful := lo.Filter(f.flushing, func(h *Head, index int) bool {
   239  		f.metrics.flushedBlocksReasons.WithLabelValues(reason).Inc()
   240  		if err := h.Flush(ctx); err != nil {
   241  			errs.Add(err)
   242  			return false
   243  		}
   244  		return true
   245  	})
   246  
   247  	// At this point we ensure that the data has been flushed on disk.
   248  	// Now we need to make it "visible" to queries, and close the old
   249  	// head once in-flight queries finish.
   250  	// TODO(kolesnikovae): Although the head move is supposed to be a quick
   251  	//  operation, consider making the lock more selective and block only
   252  	//  queries that target the old head.
   253  	f.headLock.Lock()
   254  	// Now that there are no in-flight queries we can move the head.
   255  	successful = lo.Filter(successful, func(h *Head, index int) bool {
   256  		if err := h.Move(); err != nil {
   257  			errs.Add(err)
   258  			return false
   259  		}
   260  		return true
   261  	})
   262  	// Add heads that were flushed and moved to the blockQuerier.
   263  	for _, h := range successful {
   264  		f.blockQuerier.AddBlockQuerierByMeta(h.meta)
   265  	}
   266  	f.flushing = nil
   267  	f.headLock.Unlock()
   268  	return err
   269  }
   270  
   271  func (f *PhlareDB) maxBlockDuration() time.Duration {
   272  	maxBlockDuration := 5 * time.Second
   273  	if f.cfg.MaxBlockDuration > maxBlockDuration {
   274  		maxBlockDuration = f.cfg.MaxBlockDuration
   275  	}
   276  	return maxBlockDuration
   277  }
   278  
   279  func (f *PhlareDB) maxBlockBytes() uint64 {
   280  	maxBlockBytes := defaultParquetConfig.MaxBlockBytes
   281  	if f.cfg.Parquet != nil && f.cfg.Parquet.MaxBlockBytes > 0 {
   282  		maxBlockBytes = f.cfg.Parquet.MaxBlockBytes
   283  	}
   284  	return maxBlockBytes
   285  }
   286  
   287  func (f *PhlareDB) evictBlock(e *blockEviction) {
   288  	defer close(e.done)
   289  	e.evicted, e.err = f.blockQuerier.evict(e.blockID)
   290  	if e.evicted && e.err == nil {
   291  		e.err = e.fn()
   292  	}
   293  }
   294  
   295  func (f *PhlareDB) Close() error {
   296  	close(f.stopCh)
   297  	f.wg.Wait()
   298  	errs := multierror.New()
   299  	for _, h := range f.heads {
   300  		errs.Add(h.Flush(f.phlarectx))
   301  	}
   302  	close(f.evictCh)
   303  	if err := f.blockQuerier.Close(); err != nil {
   304  		errs.Add(err)
   305  	}
   306  	f.limiter.Stop()
   307  	return errs.Err()
   308  }
   309  
   310  func (f *PhlareDB) queriers() Queriers {
   311  	queriers := f.blockQuerier.Queriers()
   312  	head := f.headQueriers()
   313  	return append(queriers, head...)
   314  }
   315  
   316  func (f *PhlareDB) headQueriers() Queriers {
   317  	res := make(Queriers, 0, len(f.heads)+len(f.flushing))
   318  	for _, h := range f.heads {
   319  		res = append(res, h.Queriers()...)
   320  	}
   321  	for _, h := range f.flushing {
   322  		res = append(res, h.Queriers()...)
   323  	}
   324  	return res
   325  }
   326  
   327  func (f *PhlareDB) Ingest(ctx context.Context, p *profilev1.Profile, id uuid.UUID, annotations []*typesv1.ProfileAnnotation, externalLabels ...*typesv1.LabelPair) (err error) {
   328  	return f.headForIngest(p.TimeNanos, func(head *Head) error {
   329  		return head.Ingest(ctx, p, id, annotations, externalLabels...)
   330  	})
   331  }
   332  
   333  func endRangeForTimestamp(t, width int64) (maxt int64) {
   334  	return (t/width)*width + width
   335  }
   336  
   337  // headForIngest returns the head assigned for the range where the given sampleTimeNanos falls.
   338  // We hold multiple heads and assign them a fixed range of timestamps.
   339  // This helps make block range fixed and predictable.
   340  func (f *PhlareDB) headForIngest(sampleTimeNanos int64, fn func(*Head) error) (err error) {
   341  	// we use the maxT of fixed interval as the key to the head map
   342  	maxT := endRangeForTimestamp(sampleTimeNanos, f.maxBlockDuration().Nanoseconds())
   343  	// We need to keep track of the in-flight ingestion requests to ensure that none
   344  	// of them will compete with Flush. Lock is acquired to avoid Add after Wait that
   345  	// is called in the very beginning of Flush.
   346  	f.headLock.RLock()
   347  	if h := f.heads[maxT]; h != nil {
   348  		h.inFlightProfiles.Add(1)
   349  		f.headLock.RUnlock()
   350  		defer h.inFlightProfiles.Done()
   351  		return fn(h)
   352  	}
   353  
   354  	f.headLock.RUnlock()
   355  	f.headLock.Lock()
   356  	head, ok := f.heads[maxT]
   357  	if !ok {
   358  		h, err := NewHead(f.phlarectx, f.cfg, f.limiter)
   359  		if err != nil {
   360  			f.headLock.Unlock()
   361  			return err
   362  		}
   363  		head = h
   364  		f.heads[maxT] = head
   365  	}
   366  	h := head
   367  	h.inFlightProfiles.Add(1)
   368  	f.headLock.Unlock()
   369  	defer h.inFlightProfiles.Done()
   370  	return fn(h)
   371  }
   372  
   373  func (f *PhlareDB) headSize() uint64 {
   374  	f.headLock.RLock()
   375  	defer f.headLock.RUnlock()
   376  	size := uint64(0)
   377  	for _, h := range f.heads {
   378  		size += h.Size()
   379  	}
   380  	return size
   381  }
   382  
   383  const (
   384  	flushReasonMaxDuration   = "max-duration"
   385  	flushReasonMaxBlockBytes = "max-block-bytes"
   386  )
   387  
   388  // LabelValues returns the possible label values for a given label name.
   389  func (f *PhlareDB) LabelValues(ctx context.Context, req *connect.Request[typesv1.LabelValuesRequest]) (resp *connect.Response[typesv1.LabelValuesResponse], err error) {
   390  	sp, ctx := opentracing.StartSpanFromContext(ctx, "PhlareDB LabelValues")
   391  	defer sp.Finish()
   392  
   393  	f.headLock.RLock()
   394  	defer f.headLock.RUnlock()
   395  
   396  	_, ok := phlaremodel.GetTimeRange(req.Msg)
   397  	if !ok {
   398  		return f.headQueriers().LabelValues(ctx, req)
   399  	}
   400  	return f.queriers().LabelValues(ctx, req)
   401  }
   402  
   403  // LabelNames returns the possible label names.
   404  func (f *PhlareDB) LabelNames(ctx context.Context, req *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error) {
   405  	sp, ctx := opentracing.StartSpanFromContext(ctx, "PhlareDB LabelNames")
   406  	defer sp.Finish()
   407  
   408  	f.headLock.RLock()
   409  	defer f.headLock.RUnlock()
   410  
   411  	_, ok := phlaremodel.GetTimeRange(req.Msg)
   412  	if !ok {
   413  		return f.headQueriers().LabelNames(ctx, req)
   414  	}
   415  	return f.queriers().LabelNames(ctx, req)
   416  }
   417  
   418  // ProfileTypes returns the possible profile types.
   419  func (f *PhlareDB) ProfileTypes(ctx context.Context, req *connect.Request[ingestv1.ProfileTypesRequest]) (resp *connect.Response[ingestv1.ProfileTypesResponse], err error) {
   420  	sp, ctx := opentracing.StartSpanFromContext(ctx, "PhlareDB ProfileTypes")
   421  	defer sp.Finish()
   422  
   423  	f.headLock.RLock()
   424  	defer f.headLock.RUnlock()
   425  
   426  	_, ok := phlaremodel.GetTimeRange(req.Msg)
   427  	if !ok {
   428  		return f.headQueriers().ProfileTypes(ctx, req)
   429  	}
   430  	return f.queriers().ProfileTypes(ctx, req)
   431  }
   432  
   433  // Series returns labels series for the given set of matchers.
   434  func (f *PhlareDB) Series(ctx context.Context, req *connect.Request[ingestv1.SeriesRequest]) (*connect.Response[ingestv1.SeriesResponse], error) {
   435  	sp, ctx := opentracing.StartSpanFromContext(ctx, "PhlareDB Series")
   436  	defer sp.Finish()
   437  
   438  	f.headLock.RLock()
   439  	defer f.headLock.RUnlock()
   440  
   441  	_, ok := phlaremodel.GetTimeRange(req.Msg)
   442  	if !ok {
   443  		return f.headQueriers().Series(ctx, req)
   444  	}
   445  	return f.queriers().Series(ctx, req)
   446  }
   447  
   448  func (f *PhlareDB) MergeProfilesStacktraces(ctx context.Context, stream *connect.BidiStream[ingestv1.MergeProfilesStacktracesRequest, ingestv1.MergeProfilesStacktracesResponse]) error {
   449  	f.headLock.RLock()
   450  	defer f.headLock.RUnlock()
   451  
   452  	return f.queriers().MergeProfilesStacktraces(ctx, stream)
   453  }
   454  
   455  func (f *PhlareDB) MergeProfilesLabels(ctx context.Context, stream *connect.BidiStream[ingestv1.MergeProfilesLabelsRequest, ingestv1.MergeProfilesLabelsResponse]) error {
   456  	f.headLock.RLock()
   457  	defer f.headLock.RUnlock()
   458  
   459  	return f.queriers().MergeProfilesLabels(ctx, stream)
   460  }
   461  
   462  func (f *PhlareDB) MergeProfilesPprof(ctx context.Context, stream *connect.BidiStream[ingestv1.MergeProfilesPprofRequest, ingestv1.MergeProfilesPprofResponse]) error {
   463  	f.headLock.RLock()
   464  	defer f.headLock.RUnlock()
   465  
   466  	return f.queriers().MergeProfilesPprof(ctx, stream)
   467  }
   468  
   469  func (f *PhlareDB) MergeSpanProfile(ctx context.Context, stream *connect.BidiStream[ingestv1.MergeSpanProfileRequest, ingestv1.MergeSpanProfileResponse]) error {
   470  	f.headLock.RLock()
   471  	defer f.headLock.RUnlock()
   472  
   473  	return f.queriers().MergeSpanProfile(ctx, stream)
   474  }
   475  
   476  type blockEviction struct {
   477  	blockID ulid.ULID
   478  	err     error
   479  	evicted bool
   480  	fn      func() error
   481  	done    chan struct{}
   482  }
   483  
   484  // Evict removes the given local block from the PhlareDB.
   485  // Note that the block files are not deleted from the disk.
   486  // No evictions should be done after and during the Close call.
   487  func (f *PhlareDB) Evict(blockID ulid.ULID, fn func() error) (bool, error) {
   488  	e := &blockEviction{
   489  		blockID: blockID,
   490  		done:    make(chan struct{}),
   491  		fn:      fn,
   492  	}
   493  	// It's assumed that the DB close is only called
   494  	// after all evictions are done, therefore it's safe
   495  	// to block here.
   496  	f.evictCh <- e
   497  	<-e.done
   498  	return e.evicted, e.err
   499  }
   500  
   501  func (f *PhlareDB) BlockMetadata(ctx context.Context, req *connect.Request[ingestv1.BlockMetadataRequest]) (*connect.Response[ingestv1.BlockMetadataResponse], error) {
   502  
   503  	var result ingestv1.BlockMetadataResponse
   504  
   505  	appendInRange := func(q TimeBounded, meta *block.Meta) {
   506  		if !InRange(q, model.Time(req.Msg.Start), model.Time(req.Msg.End)) {
   507  			return
   508  		}
   509  		var info typesv1.BlockInfo
   510  		meta.WriteBlockInfo(&info)
   511  		result.Blocks = append(result.Blocks, &info)
   512  	}
   513  
   514  	f.headLock.RLock()
   515  	for _, h := range f.heads {
   516  		appendInRange(h, h.meta)
   517  	}
   518  	for _, h := range f.flushing {
   519  		appendInRange(h, h.meta)
   520  	}
   521  	f.headLock.RUnlock()
   522  
   523  	f.blockQuerier.queriersLock.RLock()
   524  	for _, q := range f.blockQuerier.queriers {
   525  		appendInRange(q, q.meta)
   526  	}
   527  	f.blockQuerier.queriersLock.RUnlock()
   528  
   529  	// blocks move from heads to flushing to blockQuerier, so we need to check if that might have happened and caused a duplicate
   530  	result.Blocks = lo.UniqBy(result.Blocks, func(b *typesv1.BlockInfo) string {
   531  		return b.Ulid
   532  	})
   533  
   534  	return connect.NewResponse(&result), nil
   535  }
   536  
   537  func (f *PhlareDB) GetProfileStats(ctx context.Context, req *connect.Request[typesv1.GetProfileStatsRequest]) (*connect.Response[typesv1.GetProfileStatsResponse], error) {
   538  	sp, _ := opentracing.StartSpanFromContext(ctx, "PhlareDB GetProfileStats")
   539  	defer sp.Finish()
   540  
   541  	minTimes := make([]model.Time, 0)
   542  	maxTimes := make([]model.Time, 0)
   543  
   544  	f.headLock.RLock()
   545  	for _, h := range f.heads {
   546  		minT, maxT := h.Bounds()
   547  		minTimes = append(minTimes, minT)
   548  		maxTimes = append(maxTimes, maxT)
   549  	}
   550  	for _, h := range f.flushing {
   551  		minT, maxT := h.Bounds()
   552  		minTimes = append(minTimes, minT)
   553  		maxTimes = append(maxTimes, maxT)
   554  	}
   555  	f.headLock.RUnlock()
   556  
   557  	f.blockQuerier.queriersLock.RLock()
   558  	for _, q := range f.blockQuerier.queriers {
   559  		minT, maxT := q.Bounds()
   560  		minTimes = append(minTimes, minT)
   561  		maxTimes = append(maxTimes, maxT)
   562  	}
   563  	f.blockQuerier.queriersLock.RUnlock()
   564  
   565  	response, err := getProfileStatsFromBounds(minTimes, maxTimes)
   566  	return connect.NewResponse(response), err
   567  }
   568  
   569  func getProfileStatsFromBounds(minTimes, maxTimes []model.Time) (*typesv1.GetProfileStatsResponse, error) {
   570  	if len(minTimes) != len(maxTimes) {
   571  		return nil, errors.New("minTimes and maxTimes differ in length")
   572  	}
   573  	response := &typesv1.GetProfileStatsResponse{
   574  		DataIngested:      len(minTimes) > 0,
   575  		OldestProfileTime: math.MaxInt64,
   576  		NewestProfileTime: math.MinInt64,
   577  	}
   578  
   579  	for i, minTime := range minTimes {
   580  		maxTime := maxTimes[i]
   581  		if response.OldestProfileTime > minTime.Time().UnixMilli() {
   582  			response.OldestProfileTime = minTime.Time().UnixMilli()
   583  		}
   584  		if response.NewestProfileTime < maxTime.Time().UnixMilli() {
   585  			response.NewestProfileTime = maxTime.Time().UnixMilli()
   586  		}
   587  	}
   588  	return response, nil
   589  }
   590  
   591  func (f *PhlareDB) GetBlockStats(ctx context.Context, req *connect.Request[ingestv1.GetBlockStatsRequest]) (*connect.Response[ingestv1.GetBlockStatsResponse], error) {
   592  	sp, _ := opentracing.StartSpanFromContext(ctx, "PhlareDB GetBlockStats")
   593  	defer sp.Finish()
   594  
   595  	res := &ingestv1.GetBlockStatsResponse{}
   596  	f.headLock.RLock()
   597  	for _, h := range f.heads {
   598  		if slices.Contains(req.Msg.GetUlids(), h.meta.ULID.String()) {
   599  			res.BlockStats = append(res.BlockStats, h.GetMetaStats().ConvertToBlockStats())
   600  		}
   601  	}
   602  	for _, h := range f.flushing {
   603  		if slices.Contains(req.Msg.GetUlids(), h.meta.ULID.String()) {
   604  			res.BlockStats = append(res.BlockStats, h.GetMetaStats().ConvertToBlockStats())
   605  		}
   606  	}
   607  	f.headLock.RUnlock()
   608  
   609  	f.blockQuerier.queriersLock.RLock()
   610  	for _, q := range f.blockQuerier.queriers {
   611  		if slices.Contains(req.Msg.GetUlids(), q.meta.ULID.String()) {
   612  			res.BlockStats = append(res.BlockStats, q.GetMetaStats().ConvertToBlockStats())
   613  		}
   614  	}
   615  	f.blockQuerier.queriersLock.RUnlock()
   616  
   617  	return connect.NewResponse(res), nil
   618  }