github.com/grafana/pyroscope@v1.18.0/pkg/phlaredb/profile_store_test.go (about)

     1  package phlaredb
     2  
     3  import (
     4  	"context"
     5  	"crypto/md5"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"strings"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/go-kit/log"
    14  	"github.com/google/pprof/profile"
    15  	"github.com/google/uuid"
    16  	"github.com/parquet-go/parquet-go"
    17  	"github.com/prometheus/client_golang/prometheus"
    18  	"github.com/prometheus/common/model"
    19  	"github.com/samber/lo"
    20  	"github.com/stretchr/testify/assert"
    21  	"github.com/stretchr/testify/require"
    22  
    23  	profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1"
    24  	ingestv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1"
    25  	typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
    26  	phlaremodel "github.com/grafana/pyroscope/pkg/model"
    27  	phlareobj "github.com/grafana/pyroscope/pkg/objstore"
    28  	phlareobjclient "github.com/grafana/pyroscope/pkg/objstore/client"
    29  	schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1"
    30  	"github.com/grafana/pyroscope/pkg/pprof/testhelper"
    31  	phlarecontext "github.com/grafana/pyroscope/pkg/pyroscope/context"
    32  )
    33  
    34  const (
    35  	contextKeyDataDir contextKey = iota + 32
    36  )
    37  
    38  func contextWithDataDir(ctx context.Context, path string) context.Context {
    39  	return context.WithValue(ctx, contextKeyDataDir, path)
    40  }
    41  
    42  func contextDataDir(ctx context.Context) string {
    43  	return ctx.Value(contextKeyDataDir).(string)
    44  }
    45  
    46  type testCtx struct {
    47  	context.Context
    48  	dataDir           string
    49  	localBucketClient phlareobj.Bucket
    50  }
    51  
    52  func testContext(t testing.TB) testCtx {
    53  	logger := log.NewNopLogger()
    54  	if testing.Verbose() {
    55  		logger = log.NewLogfmtLogger(os.Stderr)
    56  	}
    57  
    58  	ctx, cancel := context.WithCancel(context.Background())
    59  	t.Cleanup(cancel)
    60  
    61  	ctx = phlarecontext.WithLogger(ctx, logger)
    62  
    63  	reg := prometheus.NewPedanticRegistry()
    64  	ctx = phlarecontext.WithRegistry(ctx, reg)
    65  
    66  	dataPath := t.TempDir()
    67  	ctx = contextWithDataDir(ctx, dataPath)
    68  	bucketCfg := phlareobjclient.Config{}
    69  	bucketCfg.Backend = phlareobjclient.Filesystem
    70  	bucketCfg.Filesystem.Directory = dataPath
    71  	bucketClient, err := phlareobjclient.NewBucket(ctx, bucketCfg, "testing")
    72  	require.NoError(t, err)
    73  
    74  	ctx = contextWithHeadMetrics(ctx, newHeadMetrics(reg))
    75  	return testCtx{
    76  		Context:           ctx,
    77  		dataDir:           dataPath,
    78  		localBucketClient: bucketClient,
    79  	}
    80  }
    81  
    82  type testProfile struct {
    83  	p           schemav1.InMemoryProfile
    84  	profileName string
    85  	lbls        phlaremodel.Labels
    86  }
    87  
    88  func (tp *testProfile) populateFingerprint() {
    89  	lbls := phlaremodel.NewLabelsBuilder(tp.lbls)
    90  	lbls.Set(model.MetricNameLabel, tp.profileName)
    91  	tp.p.SeriesFingerprint = model.Fingerprint(lbls.Labels().Hash())
    92  }
    93  
    94  func sameProfileStream(i int) *testProfile {
    95  	tp := &testProfile{}
    96  
    97  	tp.profileName = "process_cpu:cpu:nanoseconds:cpu:nanoseconds"
    98  	tp.lbls = phlaremodel.LabelsFromStrings(
    99  		phlaremodel.LabelNameProfileType, tp.profileName,
   100  		"job", "test",
   101  	)
   102  
   103  	tp.p.ID = uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", i))
   104  	tp.p.TimeNanos = time.Second.Nanoseconds() * int64(i)
   105  
   106  	tp.p.Samples = schemav1.Samples{
   107  		StacktraceIDs: []uint32{0x1},
   108  		Values:        []uint64{10},
   109  	}
   110  	tp.populateFingerprint()
   111  
   112  	return tp
   113  }
   114  
   115  // This will simulate a profile stream which ends and a new one starts at i > boundary
   116  func profileStreamEndingAndStarting(boundary int) func(int) *testProfile {
   117  	return func(i int) *testProfile {
   118  		tp := &testProfile{}
   119  
   120  		series := "at-beginning"
   121  		if i > boundary {
   122  			series = "at-end"
   123  		}
   124  
   125  		tp.profileName = "process_cpu:cpu:nanoseconds:cpu:nanoseconds"
   126  		tp.lbls = phlaremodel.LabelsFromStrings(
   127  			phlaremodel.LabelNameProfileType, tp.profileName,
   128  			"job", "test",
   129  			"stream", series,
   130  		)
   131  
   132  		tp.p.ID = uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", i))
   133  		tp.p.TimeNanos = time.Second.Nanoseconds() * int64(i)
   134  		tp.p.Samples = schemav1.Samples{
   135  			StacktraceIDs: []uint32{0x1},
   136  			Values:        []uint64{10},
   137  		}
   138  		tp.populateFingerprint()
   139  		return tp
   140  	}
   141  }
   142  
   143  func nProfileStreams(n int) func(int) *testProfile {
   144  	return func(i int) *testProfile {
   145  		tp := sameProfileStream(i / n)
   146  
   147  		tp.lbls = phlaremodel.LabelsFromStrings(
   148  			phlaremodel.LabelNameProfileType, tp.profileName,
   149  			"job", "test",
   150  			"stream", fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d", i%n)))),
   151  		)
   152  		tp.p.ID = uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", i))
   153  
   154  		tp.populateFingerprint()
   155  		return tp
   156  	}
   157  }
   158  
   159  func readFullParquetFile[M any](t *testing.T, path string) ([]M, uint64) {
   160  	f, err := os.Open(path)
   161  	require.NoError(t, err)
   162  	defer func() {
   163  		require.NoError(t, f.Close())
   164  	}()
   165  	stat, err := f.Stat()
   166  	require.NoError(t, err)
   167  
   168  	pf, err := parquet.OpenFile(f, stat.Size())
   169  	require.NoError(t, err)
   170  	numRGs := uint64(len(pf.RowGroups()))
   171  
   172  	reader := parquet.NewGenericReader[M](f)
   173  
   174  	slice := make([]M, reader.NumRows())
   175  	offset := 0
   176  	for {
   177  		n, err := reader.Read(slice[offset:])
   178  		if err == io.EOF {
   179  			break
   180  		}
   181  		require.NoError(t, err)
   182  		offset += n
   183  	}
   184  
   185  	return slice, numRGs
   186  }
   187  
   188  // TestProfileStore_RowGroupSplitting tests that the profile store splits row
   189  // groups when certain limits are reached. It also checks that on flushing the
   190  // block is aggregated correctly. All ingestion is done using the same profile series.
   191  func TestProfileStore_RowGroupSplitting(t *testing.T) {
   192  	var (
   193  		ctx   = testContext(t)
   194  		store = newProfileStore(ctx)
   195  	)
   196  
   197  	for _, tc := range []struct {
   198  		name            string
   199  		cfg             *ParquetConfig
   200  		expectedNumRows uint64
   201  		expectedNumRGs  uint64
   202  		values          func(int) *testProfile
   203  	}{
   204  		{
   205  			name:            "single row group",
   206  			cfg:             defaultParquetConfig,
   207  			expectedNumRGs:  1,
   208  			expectedNumRows: 100,
   209  			values:          sameProfileStream,
   210  		},
   211  		{
   212  			name:            "a stream ending after half of the samples and a new one starting",
   213  			cfg:             &ParquetConfig{MaxRowGroupBytes: 128000, MaxBufferRowCount: 10},
   214  			expectedNumRGs:  10,
   215  			expectedNumRows: 100,
   216  			values:          profileStreamEndingAndStarting(50),
   217  		},
   218  		{
   219  			name:            "multiple row groups because of maximum row num",
   220  			cfg:             &ParquetConfig{MaxRowGroupBytes: 128000, MaxBufferRowCount: 10},
   221  			expectedNumRGs:  10,
   222  			expectedNumRows: 100,
   223  			values:          sameProfileStream,
   224  		},
   225  		{
   226  			name:            "a single sample per series",
   227  			cfg:             &ParquetConfig{MaxRowGroupBytes: 128000, MaxBufferRowCount: 10},
   228  			expectedNumRGs:  10,
   229  			expectedNumRows: 100,
   230  			values:          nProfileStreams(100),
   231  		},
   232  	} {
   233  		t.Run(tc.name, func(t *testing.T) {
   234  			path := t.TempDir()
   235  			require.NoError(t, store.Init(path, tc.cfg, newHeadMetrics(prometheus.NewRegistry())))
   236  
   237  			for i := 0; i < 100; i++ {
   238  				p := tc.values(i)
   239  				require.NoError(t, store.ingest(ctx, []schemav1.InMemoryProfile{p.p}, p.lbls, p.profileName))
   240  				for store.flushing.Load() {
   241  					time.Sleep(time.Millisecond)
   242  				}
   243  			}
   244  
   245  			// ensure the correct number of files are created
   246  			numRows, numRGs, err := store.Flush(context.Background())
   247  			require.NoError(t, err)
   248  			require.NoError(t, store.DeleteRowGroups())
   249  			assert.Equal(t, tc.expectedNumRows, numRows)
   250  			assert.Equal(t, tc.expectedNumRGs, numRGs)
   251  
   252  			// list folder to ensure only aggregted block exists
   253  			files, err := os.ReadDir(path)
   254  			require.NoError(t, err)
   255  			require.Equal(t, []string{"index.tsdb", "profiles.parquet"}, lo.Map(files, func(e os.DirEntry, _ int) string {
   256  				return e.Name()
   257  			}))
   258  
   259  			rows, numRGs := readFullParquetFile[*schemav1.Profile](t, path+"/profiles.parquet")
   260  			require.Equal(t, int(tc.expectedNumRows), len(rows))
   261  			assert.Equal(t, tc.expectedNumRGs, numRGs)
   262  
   263  			// ensure all profiles are there
   264  			idExisting := make(map[uuid.UUID]int, tc.expectedNumRows)
   265  			for i := range rows {
   266  				_, ok := idExisting[rows[i].ID]
   267  				assert.False(t, ok, "expected ID to not exists more than once")
   268  				idExisting[rows[i].ID] = i
   269  			}
   270  			for i := 0; i < int(tc.expectedNumRows); i++ {
   271  				id := uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", i))
   272  				_, ok := idExisting[id]
   273  				assert.True(t, ok, fmt.Sprintf("expected ID %s to exist in output", id.String()))
   274  			}
   275  		})
   276  	}
   277  }
   278  
   279  var streams = []string{"stream-a", "stream-b", "stream-c"}
   280  
   281  func threeProfileStreams(i int) *testProfile {
   282  	tp := sameProfileStream(i)
   283  
   284  	lbls := phlaremodel.NewLabelsBuilder(tp.lbls)
   285  	lbls.Set("stream", streams[i%3])
   286  	tp.lbls = lbls.Labels()
   287  	tp.populateFingerprint()
   288  	return tp
   289  }
   290  
   291  // TestProfileStore_Ingestion_SeriesIndexes during ingestion, the profile store
   292  // writes out row groups to disk temporarily. Later when finishing up the block
   293  // it will have to combine those files on disk and update the seriesIndex,
   294  // which is only known when the TSDB index is written to disk.
   295  func TestProfileStore_Ingestion_SeriesIndexes(t *testing.T) {
   296  	var (
   297  		ctx   = testContext(t)
   298  		store = newProfileStore(ctx)
   299  	)
   300  	path := t.TempDir()
   301  	require.NoError(t, store.Init(path, defaultParquetConfig, newHeadMetrics(prometheus.NewRegistry())))
   302  
   303  	for i := 0; i < 9; i++ {
   304  		p := threeProfileStreams(i)
   305  		require.NoError(t, store.ingest(ctx, []schemav1.InMemoryProfile{p.p}, p.lbls, p.profileName))
   306  	}
   307  
   308  	// flush profiles and ensure the correct number of files are created
   309  	numRows, numRGs, err := store.Flush(context.Background())
   310  	require.NoError(t, err)
   311  	assert.Equal(t, uint64(9), numRows)
   312  	assert.Equal(t, uint64(1), numRGs)
   313  
   314  	// now compare the written parquet files
   315  	rows, numRGs := readFullParquetFile[*schemav1.Profile](t, path+"/profiles.parquet")
   316  	require.Equal(t, 9, len(rows))
   317  	assert.Equal(t, uint64(1), numRGs)
   318  	// expected in series ID order and then by timeNanos
   319  	for i := 0; i < 9; i++ {
   320  		id := i%3*3 + i/3 // generates 0,3,6,1,4,7,2,5,8
   321  		assert.Equal(t, fmt.Sprintf("00000000-0000-0000-0000-%012d", id), rows[i].ID.String())
   322  		assert.Equal(t, uint32(i/3), rows[i].SeriesIndex)
   323  	}
   324  }
   325  
   326  func BenchmarkFlush(b *testing.B) {
   327  	b.StopTimer()
   328  	ctx := testContext(b)
   329  	metrics := newHeadMetrics(prometheus.NewRegistry())
   330  	b.ReportAllocs()
   331  	samples := schemav1.Samples{
   332  		Values:        make([]uint64, 10000),
   333  		StacktraceIDs: make([]uint32, 10000),
   334  	}
   335  	for i := 0; i < 10000; i++ {
   336  		samples.Values[i] = uint64(i)
   337  		samples.StacktraceIDs[i] = uint32(i)
   338  	}
   339  	for i := 0; i < b.N; i++ {
   340  
   341  		path := b.TempDir()
   342  		store := newProfileStore(ctx)
   343  		require.NoError(b, store.Init(path, defaultParquetConfig, metrics))
   344  		for rg := 0; rg < 10; rg++ {
   345  			for i := 0; i < 10^6; i++ {
   346  				p := threeProfileStreams(i)
   347  				p.p.Samples = samples
   348  				require.NoError(b, store.ingest(ctx, []schemav1.InMemoryProfile{p.p}, p.lbls, p.profileName))
   349  			}
   350  			require.NoError(b, store.cutRowGroup(len(store.slice)))
   351  		}
   352  		b.StartTimer()
   353  		_, _, err := store.Flush(context.Background())
   354  		require.NoError(b, err)
   355  		b.StopTimer()
   356  	}
   357  }
   358  
   359  func ingestThreeProfileStreams(ctx context.Context, i int, ingest func(context.Context, *profilev1.Profile, uuid.UUID, []*typesv1.ProfileAnnotation, ...*typesv1.LabelPair) error) error {
   360  	p := testhelper.NewProfileBuilder(time.Second.Nanoseconds() * int64(i))
   361  	p.CPUProfile()
   362  	p.WithLabels(
   363  		"job", "foo",
   364  		"stream", streams[i%3],
   365  	)
   366  	p.UUID = uuid.MustParse(fmt.Sprintf("00000000-0000-0000-0000-%012d", i))
   367  	p.ForStacktraceString("func1", "func2").AddSamples(10)
   368  	p.ForStacktraceString("func1").AddSamples(20)
   369  
   370  	return ingest(ctx, p.Profile, p.UUID, nil, p.Labels...)
   371  }
   372  
   373  // TestProfileStore_Querying
   374  func TestProfileStore_Querying(t *testing.T) {
   375  	var (
   376  		ctx = testContext(t)
   377  		cfg = Config{
   378  			DataPath: t.TempDir(),
   379  		}
   380  		head, err = NewHead(ctx, cfg, NoLimit)
   381  	)
   382  	require.NoError(t, err)
   383  
   384  	// force different row group segements for profiles
   385  	head.profiles.cfg = &ParquetConfig{MaxRowGroupBytes: 128000, MaxBufferRowCount: 3}
   386  
   387  	for i := 0; i < 9; i++ {
   388  		require.NoError(t, ingestThreeProfileStreams(ctx, i, func(ctx context.Context, p *profilev1.Profile, u uuid.UUID, a []*typesv1.ProfileAnnotation, lp ...*typesv1.LabelPair) error {
   389  			defer func() {
   390  				// wait for the profile to be flushed
   391  				// todo(cyriltovena): We shouldn't need this, but when calling head.Queriers(), flushing row group and then querying using the queriers previously returned we will miss the new headDiskQuerier.
   392  				for head.profiles.flushing.Load() {
   393  					time.Sleep(time.Millisecond)
   394  				}
   395  			}()
   396  			return head.Ingest(ctx, p, u, a, lp...)
   397  		}))
   398  	}
   399  
   400  	// now query the store
   401  	params := &ingestv1.SelectProfilesRequest{
   402  		Start:         0,
   403  		End:           1000000000000,
   404  		LabelSelector: "{}",
   405  		Type:          mustParseProfileSelector(t, "process_cpu:cpu:nanoseconds:cpu:nanoseconds"),
   406  	}
   407  
   408  	t.Run("select matching profiles", func(t *testing.T) {
   409  		pIt, err := head.Queriers().SelectMatchingProfiles(ctx, params)
   410  		require.NoError(t, err)
   411  
   412  		// ensure we see the profiles we expect
   413  		var profileTS []int64
   414  		for pIt.Next() {
   415  			profileTS = append(profileTS, pIt.At().Timestamp().Unix())
   416  		}
   417  		assert.Equal(t, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8}, profileTS)
   418  	})
   419  
   420  	t.Run("merge by labels", func(t *testing.T) {
   421  		client, cleanup := head.Queriers().ingesterClient()
   422  		defer cleanup()
   423  
   424  		bidi := client.MergeProfilesLabels(ctx)
   425  
   426  		require.NoError(t, bidi.Send(&ingestv1.MergeProfilesLabelsRequest{
   427  			Request: &ingestv1.SelectProfilesRequest{
   428  				LabelSelector: params.LabelSelector,
   429  				Type:          params.Type,
   430  				Start:         params.Start,
   431  				End:           params.End,
   432  			},
   433  			By: []string{"stream"},
   434  		}))
   435  
   436  		for {
   437  			resp, err := bidi.Receive()
   438  			require.NoError(t, err)
   439  
   440  			// when empty, finished reading profiles
   441  			if resp.SelectedProfiles == nil {
   442  				break
   443  			}
   444  
   445  			selectProfiles := make([]bool, len(resp.SelectedProfiles.Profiles))
   446  			for pos := range resp.SelectedProfiles.Profiles {
   447  				selectProfiles[pos] = true
   448  			}
   449  
   450  			require.NoError(t, bidi.Send(&ingestv1.MergeProfilesLabelsRequest{
   451  				Profiles: selectProfiles,
   452  			}))
   453  		}
   454  
   455  		// still receiving a result
   456  		result, err := bidi.Receive()
   457  		require.NoError(t, err)
   458  
   459  		streams := []string{}
   460  		timestamps := []int64{}
   461  		values := []float64{}
   462  		for _, x := range result.Series {
   463  			streams = append(streams, phlaremodel.LabelPairsString(x.Labels))
   464  			for _, p := range x.Points {
   465  				timestamps = append(timestamps, p.Timestamp)
   466  				values = append(values, p.Value)
   467  			}
   468  		}
   469  		assert.Equal(
   470  			t,
   471  			[]string{`{stream="stream-a"}`, `{stream="stream-b"}`, `{stream="stream-c"}`},
   472  			streams,
   473  		)
   474  		assert.Equal(
   475  			t,
   476  			[]int64{0, 3000, 6000, 1000, 4000, 7000, 2000, 5000, 8000},
   477  			timestamps,
   478  		)
   479  		assert.Equal(
   480  			t,
   481  			[]float64{30, 30, 30, 30, 30, 30, 30, 30, 30},
   482  			values,
   483  		)
   484  	})
   485  
   486  	t.Run("merge by stacktraces", func(t *testing.T) {
   487  		client, cleanup := head.Queriers().ingesterClient()
   488  		defer cleanup()
   489  
   490  		bidi := client.MergeProfilesStacktraces(ctx)
   491  
   492  		require.NoError(t, bidi.Send(&ingestv1.MergeProfilesStacktracesRequest{
   493  			Request: &ingestv1.SelectProfilesRequest{
   494  				LabelSelector: params.LabelSelector,
   495  				Type:          params.Type,
   496  				Start:         params.Start,
   497  				End:           params.End,
   498  			},
   499  		}))
   500  
   501  		for {
   502  			resp, err := bidi.Receive()
   503  			require.NoError(t, err)
   504  
   505  			// when empty, finished reading profiles
   506  			if resp.SelectedProfiles == nil {
   507  				break
   508  			}
   509  
   510  			selectProfiles := make([]bool, len(resp.SelectedProfiles.Profiles))
   511  			for pos := range resp.SelectedProfiles.Profiles {
   512  				selectProfiles[pos] = true
   513  			}
   514  
   515  			require.NoError(t, bidi.Send(&ingestv1.MergeProfilesStacktracesRequest{
   516  				Profiles: selectProfiles,
   517  			}))
   518  		}
   519  
   520  		// still receiving a result
   521  		result, err := bidi.Receive()
   522  		require.NoError(t, err)
   523  
   524  		at, err := phlaremodel.UnmarshalTree(result.Result.TreeBytes)
   525  		require.NoError(t, err)
   526  
   527  		et := new(phlaremodel.Tree)
   528  		et.InsertStack(90, "func2", "func1")
   529  		et.InsertStack(180, "func1")
   530  
   531  		assert.Equal(t, et.String(), at.String())
   532  	})
   533  
   534  	t.Run("merge by pprof", func(t *testing.T) {
   535  		client, cleanup := head.Queriers().ingesterClient()
   536  		defer cleanup()
   537  
   538  		bidi := client.MergeProfilesPprof(ctx)
   539  
   540  		require.NoError(t, bidi.Send(&ingestv1.MergeProfilesPprofRequest{
   541  			Request: &ingestv1.SelectProfilesRequest{
   542  				LabelSelector: params.LabelSelector,
   543  				Type:          params.Type,
   544  				Start:         params.Start,
   545  				End:           params.End,
   546  			},
   547  		}))
   548  
   549  		for {
   550  			resp, err := bidi.Receive()
   551  			require.NoError(t, err)
   552  
   553  			// when empty, finished reading profiles
   554  			if resp.SelectedProfiles == nil {
   555  				break
   556  			}
   557  
   558  			selectProfiles := make([]bool, len(resp.SelectedProfiles.Profiles))
   559  			for pos := range resp.SelectedProfiles.Profiles {
   560  				selectProfiles[pos] = true
   561  			}
   562  
   563  			require.NoError(t, bidi.Send(&ingestv1.MergeProfilesPprofRequest{
   564  				Profiles: selectProfiles,
   565  			}))
   566  		}
   567  
   568  		// still receiving a result
   569  		result, err := bidi.Receive()
   570  		require.NoError(t, err)
   571  
   572  		var (
   573  			values = make(map[string]int64)
   574  			sb     strings.Builder
   575  		)
   576  
   577  		p, err := profile.ParseUncompressed(result.Result)
   578  		require.NoError(t, err)
   579  
   580  		for _, x := range p.Sample {
   581  			sb.Reset()
   582  			for _, loc := range x.Location {
   583  				for _, line := range loc.Line {
   584  					sb.WriteString(line.Function.Name)
   585  					sb.WriteString("/")
   586  				}
   587  			}
   588  			stacktrace := sb.String()[:sb.Len()-1]
   589  			values[stacktrace] = x.Value[0]
   590  		}
   591  		assert.Equal(
   592  			t,
   593  			map[string]int64{"func1/func2": 90, "func1": 180},
   594  			values,
   595  		)
   596  	})
   597  }
   598  
   599  func TestRemoveFailedSegment(t *testing.T) {
   600  	store := newProfileStore(testContext(t))
   601  	dir := t.TempDir()
   602  	require.NoError(t, store.Init(dir, defaultParquetConfig, contextHeadMetrics(context.Background())))
   603  	// fake a failed segment
   604  	_, err := os.Create(dir + "/profiles.0.parquet")
   605  	require.NoError(t, store.ingest(context.Background(), []schemav1.InMemoryProfile{{}}, phlaremodel.LabelsFromStrings(), "memory"))
   606  	require.NoError(t, err)
   607  	err = store.cutRowGroup(1)
   608  	require.NoError(t, err)
   609  }