github.com/grafana/pyroscope@v1.18.0/pkg/phlaredb/head_test.go (about)

     1  package phlaredb
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"path/filepath"
     7  	"strconv"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"connectrpc.com/connect"
    13  	"github.com/google/uuid"
    14  	"github.com/oklog/ulid/v2"
    15  	"github.com/parquet-go/parquet-go"
    16  	"github.com/prometheus/client_golang/prometheus"
    17  	"github.com/prometheus/common/model"
    18  	"github.com/stretchr/testify/assert"
    19  	"github.com/stretchr/testify/require"
    20  
    21  	profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1"
    22  	ingestv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1"
    23  	typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
    24  	"github.com/grafana/pyroscope/pkg/iter"
    25  	phlaremodel "github.com/grafana/pyroscope/pkg/model"
    26  	"github.com/grafana/pyroscope/pkg/objstore/providers/filesystem"
    27  	"github.com/grafana/pyroscope/pkg/phlaredb/block"
    28  	"github.com/grafana/pyroscope/pkg/pprof"
    29  	phlarecontext "github.com/grafana/pyroscope/pkg/pyroscope/context"
    30  )
    31  
    32  type noLimit struct{}
    33  
    34  func (n noLimit) AllowProfile(fp model.Fingerprint, lbs phlaremodel.Labels, tsNano int64) error {
    35  	return nil
    36  }
    37  
    38  func (n noLimit) Stop() {}
    39  
    40  var NoLimit = noLimit{}
    41  
    42  func newTestHead(t testing.TB) *testHead {
    43  	dataPath := t.TempDir()
    44  	ctx := testContext(t)
    45  	head, err := NewHead(ctx, Config{DataPath: dataPath}, NoLimit)
    46  	require.NoError(t, err)
    47  	return &testHead{Head: head, t: t, reg: phlarecontext.Registry(ctx).(*prometheus.Registry)}
    48  }
    49  
    50  type testHead struct {
    51  	*Head
    52  	t   testing.TB
    53  	reg *prometheus.Registry
    54  }
    55  
    56  func (t *testHead) Flush(ctx context.Context) error {
    57  	defer func() {
    58  		t.t.Logf("flushing head of block %v", t.meta.ULID)
    59  	}()
    60  	return t.Head.Flush(ctx)
    61  }
    62  
    63  func parseProfile(t testing.TB, path string) *profilev1.Profile {
    64  	p, err := pprof.OpenFile(path)
    65  	require.NoError(t, err, "failed opening profile: ", path)
    66  	return p.Profile
    67  }
    68  
    69  var valueTypeStrings = []string{"unit", "type"}
    70  
    71  func newValueType() *profilev1.ValueType {
    72  	return &profilev1.ValueType{
    73  		Unit: 1,
    74  		Type: 2,
    75  	}
    76  }
    77  
    78  func newProfileFoo() *profilev1.Profile {
    79  	baseTable := append([]string{""}, valueTypeStrings...)
    80  	baseTableLen := int64(len(baseTable)) + 0
    81  	return &profilev1.Profile{
    82  		Function: []*profilev1.Function{
    83  			{
    84  				Id:   1,
    85  				Name: baseTableLen + 0,
    86  			},
    87  			{
    88  				Id:   2,
    89  				Name: baseTableLen + 1,
    90  			},
    91  		},
    92  		Location: []*profilev1.Location{
    93  			{
    94  				Id:        1,
    95  				MappingId: 1,
    96  				Address:   0x1337,
    97  			},
    98  			{
    99  				Id:        2,
   100  				MappingId: 1,
   101  				Address:   0x1338,
   102  			},
   103  		},
   104  		Mapping: []*profilev1.Mapping{
   105  			{Id: 1, Filename: baseTableLen + 2},
   106  		},
   107  		StringTable: append(baseTable, []string{
   108  			"func_a",
   109  			"func_b",
   110  			"my-foo-binary",
   111  		}...),
   112  		TimeNanos:  123456,
   113  		PeriodType: newValueType(),
   114  		SampleType: []*profilev1.ValueType{newValueType()},
   115  		Sample: []*profilev1.Sample{
   116  			{
   117  				Value:      []int64{0o123},
   118  				LocationId: []uint64{1},
   119  			},
   120  			{
   121  				Value:      []int64{1234},
   122  				LocationId: []uint64{1, 2},
   123  			},
   124  		},
   125  	}
   126  }
   127  
   128  func newProfileBar() *profilev1.Profile {
   129  	baseTable := append([]string{""}, valueTypeStrings...)
   130  	baseTableLen := int64(len(baseTable)) + 0
   131  	return &profilev1.Profile{
   132  		Function: []*profilev1.Function{
   133  			{
   134  				Id:   10,
   135  				Name: baseTableLen + 1,
   136  			},
   137  			{
   138  				Id:   21,
   139  				Name: baseTableLen + 0,
   140  			},
   141  		},
   142  		Location: []*profilev1.Location{
   143  			{
   144  				Id:        113,
   145  				MappingId: 1,
   146  				Address:   0x1337,
   147  				Line: []*profilev1.Line{
   148  					{FunctionId: 10, Line: 1},
   149  				},
   150  			},
   151  		},
   152  		Mapping: []*profilev1.Mapping{
   153  			{Id: 1, Filename: baseTableLen + 2},
   154  		},
   155  		StringTable: append(baseTable, []string{
   156  			"func_b",
   157  			"func_a",
   158  			"my-bar-binary",
   159  		}...),
   160  		TimeNanos:  123456,
   161  		PeriodType: newValueType(),
   162  		SampleType: []*profilev1.ValueType{newValueType()},
   163  		Sample: []*profilev1.Sample{
   164  			{
   165  				Value:      []int64{2345},
   166  				LocationId: []uint64{113},
   167  			},
   168  		},
   169  	}
   170  }
   171  
   172  func newProfileBaz() *profilev1.Profile {
   173  	return &profilev1.Profile{
   174  		Function: []*profilev1.Function{
   175  			{
   176  				Id:   25,
   177  				Name: 1,
   178  			},
   179  		},
   180  		StringTable: []string{
   181  			"",
   182  			"func_c",
   183  		},
   184  	}
   185  }
   186  
   187  func TestHeadLabelValues(t *testing.T) {
   188  	head := newTestHead(t)
   189  	require.NoError(t, head.Ingest(context.Background(), newProfileFoo(), uuid.New(), nil, &typesv1.LabelPair{Name: "job", Value: "foo"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   190  	require.NoError(t, head.Ingest(context.Background(), newProfileBar(), uuid.New(), nil, &typesv1.LabelPair{Name: "job", Value: "bar"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   191  
   192  	res, err := head.LabelValues(context.Background(), connect.NewRequest(&typesv1.LabelValuesRequest{Name: "cluster"}))
   193  	require.NoError(t, err)
   194  	require.Equal(t, []string{}, res.Msg.Names)
   195  
   196  	res, err = head.LabelValues(context.Background(), connect.NewRequest(&typesv1.LabelValuesRequest{Name: "job"}))
   197  	require.NoError(t, err)
   198  	require.Equal(t, []string{"bar", "foo"}, res.Msg.Names)
   199  }
   200  
   201  func TestHeadLabelNames(t *testing.T) {
   202  	head := newTestHead(t)
   203  	require.NoError(t, head.Ingest(context.Background(), newProfileFoo(), uuid.New(), nil, &typesv1.LabelPair{Name: "job", Value: "foo"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   204  	require.NoError(t, head.Ingest(context.Background(), newProfileBar(), uuid.New(), nil, &typesv1.LabelPair{Name: "job", Value: "bar"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   205  
   206  	res, err := head.LabelNames(context.Background(), connect.NewRequest(&typesv1.LabelNamesRequest{}))
   207  	require.NoError(t, err)
   208  	require.Equal(t, []string{"__period_type__", "__period_unit__", "__profile_type__", "__type__", "__unit__", "job", "namespace"}, res.Msg.Names)
   209  }
   210  
   211  func TestHeadSeries(t *testing.T) {
   212  	head := newTestHead(t)
   213  	fooLabels := phlaremodel.NewLabelsBuilder(nil).Set("namespace", "phlare").Set("job", "foo").Labels()
   214  	barLabels := phlaremodel.NewLabelsBuilder(nil).Set("namespace", "phlare").Set("job", "bar").Labels()
   215  	require.NoError(t, head.Ingest(context.Background(), newProfileFoo(), uuid.New(), nil, fooLabels...))
   216  	require.NoError(t, head.Ingest(context.Background(), newProfileBar(), uuid.New(), nil, barLabels...))
   217  
   218  	lblBuilder := phlaremodel.NewLabelsBuilder(nil).
   219  		Set("namespace", "phlare").
   220  		Set("job", "foo").
   221  		Set("__period_type__", "type").
   222  		Set("__period_unit__", "unit").
   223  		Set("__type__", "type").
   224  		Set("__unit__", "unit").
   225  		Set("__profile_type__", ":type:unit:type:unit")
   226  	expected := lblBuilder.Labels()
   227  	res, err := head.Series(context.Background(), connect.NewRequest(&ingestv1.SeriesRequest{Matchers: []string{`{job="foo"}`}}))
   228  	require.NoError(t, err)
   229  	require.Equal(t, []*typesv1.Labels{{Labels: expected}}, res.Msg.LabelsSet)
   230  
   231  	// Test we can filter labelNames
   232  	res, err = head.Series(context.Background(), connect.NewRequest(&ingestv1.SeriesRequest{LabelNames: []string{"job", "not-existing"}}))
   233  	require.NoError(t, err)
   234  	lblBuilder.Reset(nil)
   235  	jobFoo := lblBuilder.Set("job", "foo").Labels()
   236  	lblBuilder.Reset(nil)
   237  	jobBar := lblBuilder.Set("job", "bar").Labels()
   238  	require.Len(t, res.Msg.LabelsSet, 2)
   239  	require.Contains(t, res.Msg.LabelsSet, &typesv1.Labels{Labels: jobFoo})
   240  	require.Contains(t, res.Msg.LabelsSet, &typesv1.Labels{Labels: jobBar})
   241  }
   242  
   243  func TestHeadProfileTypes(t *testing.T) {
   244  	head := newTestHead(t)
   245  	require.NoError(t, head.Ingest(context.Background(), newProfileFoo(), uuid.New(), nil, &typesv1.LabelPair{Name: "__name__", Value: "foo"}, &typesv1.LabelPair{Name: "job", Value: "foo"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   246  	require.NoError(t, head.Ingest(context.Background(), newProfileBar(), uuid.New(), nil, &typesv1.LabelPair{Name: "__name__", Value: "bar"}, &typesv1.LabelPair{Name: "namespace", Value: "phlare"}))
   247  
   248  	res, err := head.ProfileTypes(context.Background(), connect.NewRequest(&ingestv1.ProfileTypesRequest{}))
   249  	require.NoError(t, err)
   250  	require.Equal(t, []*typesv1.ProfileType{
   251  		mustParseProfileSelector(t, "bar:type:unit:type:unit"),
   252  		mustParseProfileSelector(t, "foo:type:unit:type:unit"),
   253  	}, res.Msg.ProfileTypes)
   254  }
   255  
   256  func mustParseProfileSelector(t testing.TB, selector string) *typesv1.ProfileType {
   257  	ps, err := phlaremodel.ParseProfileTypeSelector(selector)
   258  	require.NoError(t, err)
   259  	return ps
   260  }
   261  
   262  func TestHead_SelectMatchingProfiles_Order(t *testing.T) {
   263  	ctx := testContext(t)
   264  	const n = 15
   265  	head, err := NewHead(ctx, Config{
   266  		DataPath: t.TempDir(),
   267  		Parquet: &ParquetConfig{
   268  			MaxBufferRowCount: n - 1,
   269  		},
   270  	}, NoLimit)
   271  	require.NoError(t, err)
   272  
   273  	c := make(chan struct{})
   274  	var closeOnce sync.Once
   275  	head.profiles.onFlush = func() {
   276  		closeOnce.Do(func() {
   277  			close(c)
   278  		})
   279  	}
   280  
   281  	now := time.Now()
   282  	for i := 0; i < n; i++ {
   283  		x := newProfileFoo()
   284  		// Make sure some of our profiles have matching timestamps.
   285  		x.TimeNanos = now.Add(time.Second * time.Duration(i-i%2)).UnixNano()
   286  		require.NoError(t, head.Ingest(ctx, x, uuid.UUID{}, nil, []*typesv1.LabelPair{
   287  			{Name: "job", Value: "foo"},
   288  			{Name: "x", Value: strconv.Itoa(i)},
   289  		}...))
   290  	}
   291  
   292  	<-c
   293  	q := head.Queriers()
   294  	assert.Equal(t, 2, len(q)) // on-disk and in-memory parts.
   295  
   296  	typ, err := phlaremodel.ParseProfileTypeSelector(":type:unit:type:unit")
   297  	require.NoError(t, err)
   298  	req := &ingestv1.SelectProfilesRequest{
   299  		LabelSelector: "{}",
   300  		Type:          typ,
   301  		End:           now.Add(time.Hour).UnixMilli(),
   302  	}
   303  
   304  	profiles := make([]Profile, 0, n)
   305  	for _, b := range q {
   306  		i, err := b.SelectMatchingProfiles(ctx, req)
   307  		require.NoError(t, err)
   308  		s, err := iter.Slice(i)
   309  		require.NoError(t, err)
   310  		profiles = append(profiles, s...)
   311  	}
   312  
   313  	assert.Equal(t, n, len(profiles))
   314  	for i, p := range profiles {
   315  		x, err := strconv.Atoi(p.Labels().Get("x"))
   316  		require.NoError(t, err)
   317  		require.Equal(t, i, x, "SelectMatchingProfiles order mismatch")
   318  	}
   319  }
   320  
   321  func TestHeadFlush(t *testing.T) {
   322  	profilePaths := []string{
   323  		"testdata/heap",
   324  		"testdata/profile",
   325  		"testdata/profile_uncompressed",
   326  		"testdata/profile_python",
   327  		"testdata/profile_java",
   328  	}
   329  
   330  	head := newTestHead(t)
   331  	ctx := context.Background()
   332  
   333  	for pos := range profilePaths {
   334  		profile := parseProfile(t, profilePaths[pos])
   335  		require.NoError(t, head.Ingest(ctx, profile, uuid.New(), nil))
   336  	}
   337  
   338  	require.NoError(t, head.Flush(ctx))
   339  	require.NoError(t, head.Move())
   340  
   341  	b, err := filesystem.NewBucket(filepath.Dir(head.localPath))
   342  	require.NoError(t, err)
   343  	q := NewBlockQuerier(ctx, b)
   344  	metas, err := q.BlockMetas(ctx)
   345  	require.NoError(t, err)
   346  
   347  	expectedMeta := []*block.Meta{
   348  		{
   349  			ULID:    head.meta.ULID,
   350  			MinTime: head.meta.MinTime,
   351  			MaxTime: head.meta.MaxTime,
   352  			Stats: block.BlockStats{
   353  				NumSamples:  14192,
   354  				NumSeries:   8,
   355  				NumProfiles: 11,
   356  			},
   357  			Labels: map[string]string{},
   358  			Files: []block.File{
   359  				{
   360  					RelPath:   "index.tsdb",
   361  					SizeBytes: 2484,
   362  					TSDB: &block.TSDBFile{
   363  						NumSeries: 8,
   364  					},
   365  				},
   366  				{
   367  					RelPath: "profiles.parquet",
   368  					Parquet: &block.ParquetFile{
   369  						NumRowGroups: 1,
   370  						NumRows:      11,
   371  					},
   372  				},
   373  				{
   374  					RelPath: "symbols/functions.parquet",
   375  					Parquet: &block.ParquetFile{
   376  						NumRowGroups: 2,
   377  						NumRows:      1423,
   378  					},
   379  				},
   380  				{
   381  					RelPath:   "symbols/index.symdb",
   382  					SizeBytes: 308,
   383  				},
   384  				{
   385  					RelPath: "symbols/locations.parquet",
   386  					Parquet: &block.ParquetFile{
   387  						NumRowGroups: 2,
   388  						NumRows:      2469,
   389  					},
   390  				},
   391  				{
   392  					RelPath: "symbols/mappings.parquet",
   393  					Parquet: &block.ParquetFile{
   394  						NumRowGroups: 2,
   395  						NumRows:      3,
   396  					},
   397  				},
   398  				{
   399  					RelPath:   "symbols/stacktraces.symdb",
   400  					SizeBytes: 60366,
   401  				},
   402  				{
   403  					RelPath: "symbols/strings.parquet",
   404  					Parquet: &block.ParquetFile{
   405  						NumRowGroups: 2,
   406  						NumRows:      1722,
   407  					},
   408  				},
   409  			},
   410  			Compaction: block.BlockMetaCompaction{
   411  				Level: 1,
   412  				Sources: []ulid.ULID{
   413  					head.meta.ULID,
   414  				},
   415  			},
   416  			Version: 3,
   417  		},
   418  	}
   419  
   420  	// Parquet files are not deterministic, their size can change for the same input so we don't check them.
   421  	for i := range metas {
   422  		for j := range metas[i].Files {
   423  			if metas[i].Files[j].Parquet != nil && metas[i].Files[j].Parquet.NumRows != 0 {
   424  				require.NotEmpty(t, metas[i].Files[j].SizeBytes)
   425  				metas[i].Files[j].SizeBytes = 0
   426  			}
   427  		}
   428  	}
   429  
   430  	require.Equal(t, expectedMeta, metas)
   431  }
   432  
   433  // TestHead_Concurrent_Ingest_Querying tests that the head can handle concurrent reads and writes.
   434  func TestHead_Concurrent_Ingest_Querying(t *testing.T) {
   435  	var (
   436  		ctx = testContext(t)
   437  		cfg = Config{
   438  			DataPath: t.TempDir(),
   439  		}
   440  		head, err = NewHead(ctx, cfg, NoLimit)
   441  	)
   442  	require.NoError(t, err)
   443  
   444  	// force different row group segements for profiles
   445  	head.profiles.cfg = &ParquetConfig{MaxRowGroupBytes: 128000, MaxBufferRowCount: 10}
   446  
   447  	wg := sync.WaitGroup{}
   448  
   449  	profilesPerSeries := 33
   450  
   451  	for i := 0; i < 3; i++ {
   452  		wg.Add(1)
   453  		// ingester
   454  		go func(i int) {
   455  			defer wg.Done()
   456  			tick := time.NewTicker(time.Millisecond)
   457  			defer tick.Stop()
   458  			for j := 0; j < profilesPerSeries; j++ {
   459  				<-tick.C
   460  				require.NoError(t, ingestThreeProfileStreams(ctx, profilesPerSeries*i+j, head.Ingest))
   461  			}
   462  			t.Logf("ingest stream %s done", streams[i])
   463  		}(i)
   464  
   465  		// querier
   466  		wg.Add(1)
   467  		go func(i int) {
   468  			defer wg.Done()
   469  
   470  			tick := time.NewTicker(time.Millisecond)
   471  			defer tick.Stop()
   472  
   473  			tsToBeSeen := make(map[int64]struct{}, profilesPerSeries)
   474  			for j := 0; j < profilesPerSeries; j++ {
   475  				tsToBeSeen[int64(j*3+i)] = struct{}{}
   476  			}
   477  
   478  			for j := 0; j < 50; j++ {
   479  				<-tick.C
   480  				// now query the store
   481  				params := &ingestv1.SelectProfilesRequest{
   482  					Start:         0,
   483  					End:           1000000000000,
   484  					LabelSelector: fmt.Sprintf(`{stream="%s"}`, streams[i]),
   485  					Type:          mustParseProfileSelector(t, "process_cpu:cpu:nanoseconds:cpu:nanoseconds"),
   486  				}
   487  
   488  				queriers := head.Queriers()
   489  
   490  				pIt, err := queriers.SelectMatchingProfiles(ctx, params)
   491  				require.NoError(t, err)
   492  
   493  				for pIt.Next() {
   494  					ts := pIt.At().Timestamp().Unix()
   495  					if (ts % 3) != int64(i) {
   496  						panic("unexpected timestamp")
   497  					}
   498  					delete(tsToBeSeen, ts)
   499  				}
   500  
   501  				// finish once we have all the profiles
   502  				if len(tsToBeSeen) == 0 {
   503  					break
   504  				}
   505  			}
   506  			t.Logf("read stream %s done", streams[i])
   507  		}(i)
   508  
   509  	}
   510  
   511  	// TODO: We need to test if flushing misses out on ingested profiles
   512  
   513  	wg.Wait()
   514  }
   515  
   516  func TestIsStale(t *testing.T) {
   517  	head := newTestHead(t)
   518  	now := time.Unix(0, time.Minute.Nanoseconds())
   519  
   520  	// should not be stale if have not past the stale grace period
   521  	head.updatedAt.Store(time.Unix(0, 0))
   522  	require.False(t, head.isStale(now.UnixNano(), now))
   523  	// should be stale as we have passed the stale grace period
   524  	require.True(t, head.isStale(now.UnixNano(), now.Add(2*StaleGracePeriod)))
   525  	// Should not be stale if maxT is not passed.
   526  	require.False(t, head.isStale(now.Add(2*StaleGracePeriod).UnixNano(), now.Add(2*StaleGracePeriod)))
   527  }
   528  
   529  func profileWithID(id int) (*profilev1.Profile, uuid.UUID) {
   530  	p := newProfileFoo()
   531  	p.TimeNanos = int64(id)
   532  	return p, uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", id))
   533  }
   534  
   535  func TestHead_ProfileOrder(t *testing.T) {
   536  	head := newTestHead(t)
   537  
   538  	p, u := profileWithID(1)
   539  	require.NoError(t, head.Ingest(
   540  		context.Background(),
   541  		p,
   542  		u,
   543  		nil,
   544  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "memory"},
   545  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   546  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-a"},
   547  	))
   548  
   549  	p, u = profileWithID(2)
   550  	require.NoError(t, head.Ingest(
   551  		context.Background(),
   552  		p,
   553  		u,
   554  		nil,
   555  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "memory"},
   556  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   557  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-b"},
   558  		&typesv1.LabelPair{Name: "____Label", Value: "important"},
   559  	))
   560  
   561  	p, u = profileWithID(3)
   562  	require.NoError(t, head.Ingest(
   563  		context.Background(),
   564  		p,
   565  		u,
   566  		nil,
   567  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "memory"},
   568  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   569  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-c"},
   570  		&typesv1.LabelPair{Name: "AAALabel", Value: "important"},
   571  	))
   572  
   573  	p, u = profileWithID(4)
   574  	require.NoError(t, head.Ingest(
   575  		context.Background(),
   576  		p,
   577  		u,
   578  		nil,
   579  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "cpu"},
   580  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   581  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-a"},
   582  		&typesv1.LabelPair{Name: "000Label", Value: "important"},
   583  	))
   584  
   585  	p, u = profileWithID(5)
   586  	require.NoError(t, head.Ingest(
   587  		context.Background(),
   588  		p,
   589  		u,
   590  		nil,
   591  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "cpu"},
   592  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   593  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-b"},
   594  	))
   595  
   596  	p, u = profileWithID(6)
   597  	require.NoError(t, head.Ingest(
   598  		context.Background(),
   599  		p,
   600  		u,
   601  		nil,
   602  		&typesv1.LabelPair{Name: phlaremodel.LabelNameProfileName, Value: "cpu"},
   603  		&typesv1.LabelPair{Name: phlaremodel.LabelNameOrder, Value: phlaremodel.LabelOrderEnforced},
   604  		&typesv1.LabelPair{Name: phlaremodel.LabelNameServiceName, Value: "service-b"},
   605  	))
   606  
   607  	head.Flush(context.Background())
   608  
   609  	// test that the profiles are ordered correctly
   610  	type row struct{ TimeNanos uint64 }
   611  	rows, err := parquet.ReadFile[row](filepath.Join(head.headPath, "profiles.parquet"))
   612  	require.NoError(t, err)
   613  	require.Equal(t, []row{
   614  		{4}, {5}, {6}, {1}, {2}, {3},
   615  	}, rows)
   616  }
   617  
   618  func BenchmarkHeadIngestProfiles(t *testing.B) {
   619  	var (
   620  		profilePaths = []string{
   621  			"testdata/heap",
   622  			"testdata/profile",
   623  		}
   624  		profileCount = 0
   625  	)
   626  
   627  	head := newTestHead(t)
   628  	ctx := context.Background()
   629  
   630  	t.ReportAllocs()
   631  
   632  	for n := 0; n < t.N; n++ {
   633  		for pos := range profilePaths {
   634  			p := parseProfile(t, profilePaths[pos])
   635  			require.NoError(t, head.Ingest(ctx, p, uuid.New(), nil))
   636  			profileCount++
   637  		}
   638  	}
   639  }