github.com/grafana/pyroscope@v1.18.0/pkg/segmentwriter/segment_test.go (about)

     1  package segmentwriter
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"flag"
     8  	"fmt"
     9  	"io"
    10  	"math/rand"
    11  	"path/filepath"
    12  	"slices"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  
    19  	gprofile "github.com/google/pprof/profile"
    20  	"github.com/grafana/dskit/flagext"
    21  	prommodel "github.com/prometheus/common/model"
    22  	"github.com/stretchr/testify/assert"
    23  	"github.com/stretchr/testify/mock"
    24  	"github.com/stretchr/testify/require"
    25  	"github.com/thanos-io/objstore"
    26  	"golang.org/x/time/rate"
    27  
    28  	profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1"
    29  	ingesterv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1"
    30  	"github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1/ingesterv1connect"
    31  	metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1"
    32  	typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
    33  	"github.com/grafana/pyroscope/pkg/block"
    34  	"github.com/grafana/pyroscope/pkg/block/metadata"
    35  	"github.com/grafana/pyroscope/pkg/metastore"
    36  	"github.com/grafana/pyroscope/pkg/metastore/index/dlq"
    37  	metastoretest "github.com/grafana/pyroscope/pkg/metastore/test"
    38  	"github.com/grafana/pyroscope/pkg/model"
    39  	"github.com/grafana/pyroscope/pkg/objstore/providers/filesystem"
    40  	"github.com/grafana/pyroscope/pkg/objstore/providers/memory"
    41  	"github.com/grafana/pyroscope/pkg/og/convert/pprof/bench"
    42  	"github.com/grafana/pyroscope/pkg/phlaredb"
    43  	testutil3 "github.com/grafana/pyroscope/pkg/phlaredb/block/testutil"
    44  	pprofth "github.com/grafana/pyroscope/pkg/pprof/testhelper"
    45  	"github.com/grafana/pyroscope/pkg/segmentwriter/memdb"
    46  	memdbtest "github.com/grafana/pyroscope/pkg/segmentwriter/memdb/testutil"
    47  	"github.com/grafana/pyroscope/pkg/test"
    48  	"github.com/grafana/pyroscope/pkg/test/mocks/mockdlq"
    49  	"github.com/grafana/pyroscope/pkg/test/mocks/mockmetastorev1"
    50  	"github.com/grafana/pyroscope/pkg/test/mocks/mockobjstore"
    51  	"github.com/grafana/pyroscope/pkg/validation"
    52  )
    53  
    54  func TestSegmentIngest(t *testing.T) {
    55  	td := [][]inputChunk{
    56  		staticTestData(),
    57  		testDataGenerator{
    58  			seed:     239,
    59  			chunks:   3,
    60  			profiles: 256,
    61  			shards:   4,
    62  			tenants:  3,
    63  			services: 5,
    64  		}.generate(),
    65  		//testDataGenerator{
    66  		//	seed:     time.Now().UnixNano(),
    67  		//	chunks:   3,
    68  		//	profiles: 4096,
    69  		//	shards:   8,
    70  		//	tenants:  12,
    71  		//	services: 16,
    72  		//}.generate(),
    73  	}
    74  	for i, chunks := range td {
    75  		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
    76  			t.Run("ingestWithMetastoreAvailable", func(t *testing.T) {
    77  				ingestWithMetastoreAvailable(t, chunks)
    78  			})
    79  			t.Run("ingestWithDLQ", func(t *testing.T) {
    80  				ingestWithDLQ(t, chunks)
    81  			})
    82  		})
    83  	}
    84  }
    85  
    86  func ingestWithMetastoreAvailable(t *testing.T, chunks []inputChunk) {
    87  	sw := newTestSegmentWriter(t, defaultTestConfig())
    88  	defer sw.stop()
    89  	blocks := make(chan *metastorev1.BlockMeta, 128)
    90  
    91  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
    92  		Run(func(args mock.Arguments) {
    93  			blocks <- args.Get(1).(*metastorev1.AddBlockRequest).Block
    94  		}).Return(new(metastorev1.AddBlockResponse), nil)
    95  	for _, chunk := range chunks {
    96  		chunkBlocks := make([]*metastorev1.BlockMeta, 0, len(chunk))
    97  		waiterSet := sw.ingestChunk(t, chunk, false)
    98  		for range waiterSet {
    99  			meta := <-blocks
   100  			chunkBlocks = append(chunkBlocks, meta)
   101  		}
   102  		inputs := groupInputs(t, chunk)
   103  		clients := sw.createBlocksFromMetas(chunkBlocks)
   104  		sw.queryInputs(clients, inputs)
   105  	}
   106  }
   107  
   108  func ingestWithDLQ(t *testing.T, chunks []inputChunk) {
   109  	sw := newTestSegmentWriter(t, defaultTestConfig())
   110  	defer sw.stop()
   111  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   112  		Return(nil, fmt.Errorf("metastore unavailable"))
   113  	ingestedChunks := make([]inputChunk, 0, len(chunks))
   114  	for chunkIndex, chunk := range chunks {
   115  		t.Logf("ingesting chunk %d", chunkIndex)
   116  		_ = sw.ingestChunk(t, chunk, false)
   117  		ingestedChunks = append(ingestedChunks, chunk)
   118  		allBlocks := sw.getMetadataDLQ()
   119  		clients := sw.createBlocksFromMetas(allBlocks)
   120  		inputs := groupInputs(t, ingestedChunks...)
   121  		t.Logf("querying chunk %d", chunkIndex)
   122  		sw.queryInputs(clients, inputs)
   123  	}
   124  }
   125  
   126  func TestIngestWait(t *testing.T) {
   127  	sw := newTestSegmentWriter(t, defaultTestConfig())
   128  
   129  	defer sw.stop()
   130  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
   131  		time.Sleep(1 * time.Second)
   132  	}).Return(new(metastorev1.AddBlockResponse), nil)
   133  
   134  	t1 := time.Now()
   135  	awaiter := sw.ingest(0, func(head segmentIngest) {
   136  		p := cpuProfile(42, 480, "svc1", "foo", "bar")
   137  		head.ingest("t1", p.Profile, p.UUID, p.Labels, p.Annotations)
   138  	})
   139  	err := awaiter.waitFlushed(context.Background())
   140  	require.NoError(t, err)
   141  	since := time.Since(t1)
   142  	require.True(t, since > 1*time.Second)
   143  }
   144  
   145  func TestBusyIngestLoop(t *testing.T) {
   146  
   147  	sw := newTestSegmentWriter(t, defaultTestConfig())
   148  	defer sw.stop()
   149  
   150  	writeCtx, writeCancel := context.WithCancel(context.Background())
   151  	readCtx, readCancel := context.WithCancel(context.Background())
   152  	metaChan := make(chan *metastorev1.BlockMeta)
   153  
   154  	var cnt atomic.Int32
   155  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   156  		Run(func(args mock.Arguments) {
   157  			metaChan <- args.Get(1).(*metastorev1.AddBlockRequest).Block
   158  			if cnt.Add(1) == 3 {
   159  				writeCancel()
   160  			}
   161  		}).Return(new(metastorev1.AddBlockResponse), nil)
   162  	metas := make([]*metastorev1.BlockMeta, 0)
   163  	readG := sync.WaitGroup{}
   164  	readG.Add(1)
   165  	go func() {
   166  		defer readG.Done()
   167  		for {
   168  			select {
   169  			case <-readCtx.Done():
   170  				return
   171  			case meta := <-metaChan:
   172  				metas = append(metas, meta)
   173  			}
   174  		}
   175  	}()
   176  	writeG := sync.WaitGroup{}
   177  	allProfiles := make([]*pprofth.ProfileBuilder, 0)
   178  	m := new(sync.Mutex)
   179  	nWorkers := 5
   180  	for i := 0; i < nWorkers; i++ {
   181  		workerno := i
   182  		writeG.Add(1)
   183  		go func() {
   184  			defer writeG.Done()
   185  			awaiters := make([]segmentWaitFlushed, 0)
   186  			profiles := make([]*pprofth.ProfileBuilder, 0)
   187  			defer func() {
   188  				require.NotEmpty(t, profiles)
   189  				require.NotEmpty(t, awaiters)
   190  				for _, awaiter := range awaiters {
   191  					err := awaiter.waitFlushed(context.Background())
   192  					require.NoError(t, err)
   193  				}
   194  				m.Lock()
   195  				allProfiles = append(allProfiles, profiles...)
   196  				m.Unlock()
   197  			}()
   198  			for {
   199  				select {
   200  				case <-writeCtx.Done():
   201  					return
   202  				default:
   203  					ts := workerno*1000000000 + len(profiles)
   204  					awaiter := sw.ingest(1, func(head segmentIngest) {
   205  						p := cpuProfile(42, ts, "svc1", "foo", "bar")
   206  						head.ingest("t1", p.CloneVT(), p.UUID, p.Labels, p.Annotations)
   207  						profiles = append(profiles, p)
   208  					})
   209  					awaiters = append(awaiters, awaiter)
   210  				}
   211  			}
   212  		}()
   213  	}
   214  	writeG.Wait()
   215  
   216  	readCancel()
   217  	readG.Wait()
   218  	assert.True(t, len(metas) >= 3)
   219  
   220  	chunk := make(inputChunk, 0)
   221  	for _, p := range allProfiles {
   222  		chunk = append(chunk, input{shard: 1, tenant: "t1", profile: p})
   223  	}
   224  	inputs := groupInputs(t, chunk)
   225  	clients := sw.createBlocksFromMetas(metas)
   226  	sw.queryInputs(clients, inputs)
   227  }
   228  
   229  func TestDLQFail(t *testing.T) {
   230  	l := test.NewTestingLogger(t)
   231  	bucket := mockobjstore.NewMockBucket(t)
   232  	bucket.On("Upload", mock.Anything, mock.MatchedBy(func(name string) bool {
   233  		return isSegmentPath(name)
   234  	}), mock.Anything).Return(nil)
   235  	bucket.On("Upload", mock.Anything, mock.MatchedBy(func(name string) bool {
   236  		return isDLQPath(name)
   237  	}), mock.Anything).Return(fmt.Errorf("mock upload DLQ error"))
   238  	client := mockmetastorev1.NewMockIndexServiceClient(t)
   239  	client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   240  		Return(nil, fmt.Errorf("mock add block error"))
   241  
   242  	res := newSegmentWriter(
   243  		l,
   244  		newSegmentMetrics(nil),
   245  		memdb.NewHeadMetricsWithPrefix(nil, ""),
   246  		defaultTestConfig(),
   247  		validation.MockDefaultOverrides(),
   248  		bucket,
   249  		client,
   250  	)
   251  	defer res.stop()
   252  	ts := 420
   253  	ing := func(head segmentIngest) {
   254  		ts += 420
   255  		p := cpuProfile(42, ts, "svc1", "foo", "bar")
   256  		head.ingest("t1", p.Profile, p.UUID, p.Labels, p.Annotations)
   257  	}
   258  
   259  	awaiter1 := res.ingest(0, ing)
   260  	awaiter2 := res.ingest(0, ing)
   261  
   262  	err1 := awaiter1.waitFlushed(context.Background())
   263  	require.Error(t, err1)
   264  
   265  	err2 := awaiter1.waitFlushed(context.Background())
   266  	require.Error(t, err2)
   267  
   268  	err3 := awaiter2.waitFlushed(context.Background())
   269  	require.Error(t, err3)
   270  
   271  	require.Equal(t, err1, err2)
   272  	require.Equal(t, err1, err3)
   273  }
   274  
   275  func TestDatasetMinMaxTime(t *testing.T) {
   276  	l := test.NewTestingLogger(t)
   277  	bucket := memory.NewInMemBucket()
   278  	metas := make(chan *metastorev1.BlockMeta)
   279  	client := mockmetastorev1.NewMockIndexServiceClient(t)
   280  	client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   281  		Run(func(args mock.Arguments) {
   282  			meta := args.Get(1).(*metastorev1.AddBlockRequest).Block
   283  			metas <- meta
   284  		}).Return(new(metastorev1.AddBlockResponse), nil)
   285  	res := newSegmentWriter(
   286  		l,
   287  		newSegmentMetrics(nil),
   288  		memdb.NewHeadMetricsWithPrefix(nil, ""),
   289  		defaultTestConfig(),
   290  		validation.MockDefaultOverrides(),
   291  		bucket,
   292  		client,
   293  	)
   294  	data := []input{
   295  		{shard: 1, tenant: "tb", profile: cpuProfile(42, 239, "svc1", "foo", "bar")},
   296  		{shard: 1, tenant: "tb", profile: cpuProfile(13, 420, "svc1", "qwe", "foo", "bar")},
   297  		{shard: 1, tenant: "tb", profile: cpuProfile(13, 420, "svc2", "qwe", "foo", "bar")},
   298  		{shard: 1, tenant: "tb", profile: cpuProfile(13, 421, "svc2", "qwe", "foo", "bar")},
   299  		{shard: 1, tenant: "ta", profile: cpuProfile(13, 10, "svc1", "vbn", "foo", "bar")},
   300  		{shard: 1, tenant: "ta", profile: cpuProfile(13, 1337, "svc1", "vbn", "foo", "bar")},
   301  	}
   302  	_ = res.ingest(1, func(head segmentIngest) {
   303  		for _, p := range data {
   304  			head.ingest(p.tenant, p.profile.Profile, p.profile.UUID, p.profile.Labels, p.profile.Annotations)
   305  		}
   306  	})
   307  	defer res.stop()
   308  
   309  	block := <-metas
   310  
   311  	expected := [][2]int{
   312  		{10, 1337},
   313  		{239, 420},
   314  		{420, 421},
   315  	}
   316  
   317  	require.Equal(t, len(expected), len(block.Datasets))
   318  	for i, ds := range block.Datasets {
   319  		assert.Equalf(t, expected[i][0], int(ds.MinTime), "idx %d", i)
   320  		assert.Equalf(t, expected[i][1], int(ds.MaxTime), "idx %d", i)
   321  	}
   322  	assert.Equal(t, int64(10), block.MinTime)
   323  	assert.Equal(t, int64(1337), block.MaxTime)
   324  }
   325  
   326  func TestQueryMultipleSeriesSingleTenant(t *testing.T) {
   327  	metas := make(chan *metastorev1.BlockMeta, 1)
   328  
   329  	sw := newTestSegmentWriter(t, defaultTestConfig())
   330  	defer sw.stop()
   331  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   332  		Run(func(args mock.Arguments) {
   333  			metas <- args.Get(1).(*metastorev1.AddBlockRequest).Block
   334  		}).Return(new(metastorev1.AddBlockResponse), nil)
   335  
   336  	data := inputChunk([]input{
   337  		{shard: 1, tenant: "tb", profile: cpuProfile(42, 239, "svc1", "kek", "foo", "bar")},
   338  		{shard: 1, tenant: "tb", profile: cpuProfile(13, 420, "svc1", "qwe1", "foo", "bar")},
   339  		{shard: 1, tenant: "tb", profile: cpuProfile(17, 420, "svc2", "qwe3", "foo", "bar")},
   340  		{shard: 1, tenant: "tb", profile: cpuProfile(13, 421, "svc2", "qwe", "foo", "bar")},
   341  		{shard: 1, tenant: "ta", profile: cpuProfile(13, 10, "svc1", "vbn", "foo", "bar")},
   342  		{shard: 1, tenant: "ta", profile: cpuProfile(13, 1337, "svc1", "vbn", "foo", "bar")},
   343  	})
   344  	sw.ingestChunk(t, data, false)
   345  	block := <-metas
   346  
   347  	clients := sw.createBlocksFromMetas([]*metastorev1.BlockMeta{block})
   348  	defer func() {
   349  		for _, tc := range clients {
   350  			tc.f()
   351  		}
   352  	}()
   353  
   354  	client := clients["tb"]
   355  	actualMerged := sw.query(client, &ingesterv1.SelectProfilesRequest{
   356  		LabelSelector: "{service_name=~\"svc[12]\"}",
   357  		Type:          mustParseProfileSelector(t, "process_cpu:cpu:nanoseconds:cpu:nanoseconds"),
   358  		Start:         239,
   359  		End:           420,
   360  	})
   361  	expectedMerged := mergeProfiles(t, []*profilev1.Profile{
   362  		data[0].profile.Profile,
   363  		data[1].profile.Profile,
   364  		data[2].profile.Profile,
   365  	})
   366  	actualCollapsed := bench.StackCollapseProto(actualMerged, 0, 1)
   367  	expectedCollapsed := bench.StackCollapseProto(expectedMerged, 0, 1)
   368  	require.Equal(t, expectedCollapsed, actualCollapsed)
   369  }
   370  
   371  func TestDLQRecoveryMock(t *testing.T) {
   372  	chunk := inputChunk([]input{
   373  		{shard: 1, tenant: "tb", profile: cpuProfile(42, 239, "svc1", "kek", "foo", "bar")},
   374  	})
   375  
   376  	sw := newTestSegmentWriter(t, defaultTestConfig())
   377  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   378  		Return(nil, fmt.Errorf("mock metastore unavailable"))
   379  
   380  	_ = sw.ingestChunk(t, chunk, false)
   381  	allBlocks := sw.getMetadataDLQ()
   382  	assert.Len(t, allBlocks, 1)
   383  
   384  	recoveredMetas := make(chan *metastorev1.BlockMeta, 1)
   385  	srv := mockdlq.NewMockMetastore(t)
   386  	srv.On("AddRecoveredBlock", mock.Anything, mock.Anything).
   387  		Once().
   388  		Run(func(args mock.Arguments) {
   389  			meta := args.Get(1).(*metastorev1.AddBlockRequest).Block
   390  			recoveredMetas <- meta
   391  		}).
   392  		Return(&metastorev1.AddBlockResponse{}, nil)
   393  	recovery := dlq.NewRecovery(test.NewTestingLogger(t), dlq.Config{
   394  		CheckInterval: 100 * time.Millisecond,
   395  	}, srv, sw.bucket, nil)
   396  	recovery.Start()
   397  	defer recovery.Stop()
   398  
   399  	meta := <-recoveredMetas
   400  	assert.Equal(t, allBlocks[0].Id, meta.Id)
   401  
   402  	clients := sw.createBlocksFromMetas(allBlocks)
   403  	inputs := groupInputs(t, chunk)
   404  	sw.queryInputs(clients, inputs)
   405  }
   406  
   407  func TestDLQRecovery(t *testing.T) {
   408  	const tenant = "tb"
   409  	var ts = time.Now().UnixMilli()
   410  	chunk := inputChunk([]input{
   411  		{shard: 1, tenant: tenant, profile: cpuProfile(42, int(ts), "svc1", "kek", "foo", "bar")},
   412  	})
   413  
   414  	sw := newTestSegmentWriter(t, defaultTestConfig())
   415  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   416  		Return(nil, fmt.Errorf("mock metastore unavailable"))
   417  
   418  	_ = sw.ingestChunk(t, chunk, false)
   419  
   420  	cfg := new(metastore.Config)
   421  	flagext.DefaultValues(cfg)
   422  	cfg.Index.Recovery.CheckInterval = 100 * time.Millisecond
   423  	m := metastoretest.NewMetastoreSet(t, cfg, 3, sw.bucket)
   424  	defer m.Close()
   425  
   426  	queryBlock := func() *metastorev1.BlockMeta {
   427  		res, err := m.Client.QueryMetadata(context.Background(), &metastorev1.QueryMetadataRequest{
   428  			TenantId:  []string{tenant},
   429  			StartTime: ts - 1,
   430  			EndTime:   ts + 1,
   431  			Query:     "{service_name=~\"svc1\"}",
   432  		})
   433  		if err != nil {
   434  			return nil
   435  		}
   436  		if len(res.Blocks) == 1 {
   437  			return res.Blocks[0]
   438  		}
   439  		return nil
   440  	}
   441  	require.Eventually(t, func() bool {
   442  		return queryBlock() != nil
   443  	}, 10*time.Second, 100*time.Millisecond)
   444  
   445  	block := queryBlock()
   446  	require.NotNil(t, block)
   447  
   448  	clients := sw.createBlocksFromMetas([]*metastorev1.BlockMeta{block})
   449  	inputs := groupInputs(t, chunk)
   450  	sw.queryInputs(clients, inputs)
   451  }
   452  
   453  func TestUnsymbolizedLabelIsSet(t *testing.T) {
   454  	sw := newTestSegmentWriter(t, defaultTestConfig())
   455  	defer sw.stop()
   456  	blocks := make(chan *metastorev1.BlockMeta, 1)
   457  
   458  	sw.client.On("AddBlock", mock.Anything, mock.Anything, mock.Anything).
   459  		Run(func(args mock.Arguments) {
   460  			blocks <- args.Get(1).(*metastorev1.AddBlockRequest).Block
   461  		}).Return(new(metastorev1.AddBlockResponse), nil)
   462  
   463  	p := pprofth.NewProfileBuilder(time.Now().UnixNano()).
   464  		CPUProfile().
   465  		WithLabels(model.LabelNameServiceName, "svc1")
   466  
   467  	p.Mapping = []*profilev1.Mapping{
   468  		{Id: 1, HasFunctions: false},
   469  	}
   470  
   471  	loc := &profilev1.Location{
   472  		Id:        1,
   473  		MappingId: 1,
   474  		Line:      nil,
   475  	}
   476  	p.Location = append(p.Location, loc)
   477  
   478  	keyIdx := int64(len(p.StringTable))
   479  	p.StringTable = append(p.StringTable, "foo")
   480  	valIdx := int64(len(p.StringTable))
   481  	p.StringTable = append(p.StringTable, "bar")
   482  
   483  	sample1 := &profilev1.Sample{
   484  		LocationId: []uint64{1},
   485  		Value:      []int64{1},
   486  		Label: []*profilev1.Label{
   487  			{Key: keyIdx, Str: valIdx},
   488  		},
   489  	}
   490  	p.Sample = append(p.Sample, sample1)
   491  
   492  	sample2 := &profilev1.Sample{
   493  		LocationId: []uint64{1},
   494  		Value:      []int64{2},
   495  		Label:      nil,
   496  	}
   497  	p.Sample = append(p.Sample, sample2)
   498  
   499  	chunk := inputChunk{
   500  		{shard: 1, tenant: "t1", profile: p},
   501  	}
   502  	_ = sw.ingestChunk(t, chunk, false)
   503  	block := <-blocks
   504  
   505  	require.True(t, hasUnsymbolizedLabel(t, block))
   506  }
   507  
   508  type sw struct {
   509  	*segmentsWriter
   510  	bucket  *memory.InMemBucket
   511  	client  *mockmetastorev1.MockIndexServiceClient
   512  	t       *testing.T
   513  	queryNo int
   514  }
   515  
   516  func newTestSegmentWriter(t *testing.T, cfg Config) sw {
   517  	l := test.NewTestingLogger(t)
   518  	bucket := memory.NewInMemBucket()
   519  	client := mockmetastorev1.NewMockIndexServiceClient(t)
   520  	res := newSegmentWriter(
   521  		l,
   522  		newSegmentMetrics(nil),
   523  		memdb.NewHeadMetricsWithPrefix(nil, ""),
   524  		cfg,
   525  		validation.MockDefaultOverrides(),
   526  		bucket,
   527  		client,
   528  	)
   529  	return sw{
   530  		t:              t,
   531  		segmentsWriter: res,
   532  		bucket:         bucket,
   533  		client:         client,
   534  	}
   535  }
   536  
   537  func defaultTestConfig() Config {
   538  	return Config{
   539  		SegmentDuration:       100 * time.Millisecond,
   540  		UploadTimeout:         time.Second,
   541  		MetadataUpdateTimeout: time.Second,
   542  		MetadataDLQEnabled:    true,
   543  	}
   544  }
   545  
   546  func (sw *sw) createBlocksFromMetas(blocks []*metastorev1.BlockMeta) tenantClients {
   547  	dir := sw.t.TempDir()
   548  	for _, meta := range blocks {
   549  		blobReader, err := sw.bucket.Get(context.Background(), block.ObjectPath(meta))
   550  		require.NoError(sw.t, err)
   551  		blob, err := io.ReadAll(blobReader)
   552  		require.NoError(sw.t, err)
   553  
   554  		for _, ds := range meta.Datasets {
   555  			tenant := meta.StringTable[ds.Tenant]
   556  			profiles := blob[ds.TableOfContents[0]:ds.TableOfContents[1]]
   557  			tsdb := blob[ds.TableOfContents[1]:ds.TableOfContents[2]]
   558  			symbols := blob[ds.TableOfContents[2] : ds.TableOfContents[0]+ds.Size]
   559  			testutil3.CreateBlockFromMemory(sw.t,
   560  				filepath.Join(dir, tenant),
   561  				prommodel.TimeFromUnixNano(ds.MinTime*1e6), //todo  do not use 1e6, add comments to minTime clarifying the unit
   562  				prommodel.TimeFromUnixNano(ds.MaxTime*1e6),
   563  				profiles,
   564  				tsdb,
   565  				symbols,
   566  			)
   567  		}
   568  	}
   569  
   570  	res := make(tenantClients)
   571  	for _, meta := range blocks {
   572  		for _, ds := range meta.Datasets {
   573  			tenant := meta.StringTable[ds.Tenant]
   574  			if _, ok := res[tenant]; !ok {
   575  				// todo consider not using BlockQuerier for tests
   576  				blockBucket, err := filesystem.NewBucket(filepath.Join(dir, tenant))
   577  				require.NoError(sw.t, err)
   578  
   579  				blockQuerier := phlaredb.NewBlockQuerier(context.Background(), blockBucket)
   580  				err = blockQuerier.Sync(context.Background())
   581  				require.NoError(sw.t, err)
   582  
   583  				queriers := blockQuerier.Queriers()
   584  				err = queriers.Open(context.Background())
   585  				require.NoError(sw.t, err)
   586  
   587  				q, f := memdbtest.IngesterClientForTest(sw.t, queriers)
   588  
   589  				res[tenant] = tenantClient{
   590  					tenant: tenant,
   591  					client: q,
   592  					f:      f,
   593  				}
   594  			}
   595  		}
   596  	}
   597  
   598  	return res
   599  }
   600  
   601  func (sw *sw) queryInputs(clients tenantClients, inputs groupedInputs) {
   602  	sw.queryNo++
   603  	t := sw.t
   604  	defer func() {
   605  		for _, tc := range clients {
   606  			tc.f()
   607  		}
   608  	}()
   609  
   610  	for tenant, tenantInputs := range inputs {
   611  		tc, ok := clients[tenant]
   612  		require.True(sw.t, ok)
   613  		for svc, metricNameInputs := range tenantInputs {
   614  			for metricName, profiles := range metricNameInputs {
   615  				start, end := getStartEndTime(profiles)
   616  				ps := make([]*profilev1.Profile, 0, len(profiles))
   617  				for _, p := range profiles {
   618  					ps = append(ps, p.Profile)
   619  				}
   620  				expectedMerged := mergeProfiles(sw.t, ps)
   621  
   622  				sts := sampleTypesFromMetricName(sw.t, metricName)
   623  				for sti, st := range sts {
   624  					q := &ingesterv1.SelectProfilesRequest{
   625  						LabelSelector: fmt.Sprintf("{%s=\"%s\"}", model.LabelNameServiceName, svc),
   626  						Type:          st,
   627  						Start:         start,
   628  						End:           end,
   629  					}
   630  					actualMerged := sw.query(tc, q)
   631  
   632  					actualCollapsed := bench.StackCollapseProto(actualMerged, 0, 1)
   633  					expectedCollapsed := bench.StackCollapseProto(expectedMerged, sti, 1)
   634  					require.Equal(t, expectedCollapsed, actualCollapsed)
   635  				}
   636  
   637  			}
   638  		}
   639  	}
   640  }
   641  
   642  func (sw *sw) query(tc tenantClient, q *ingesterv1.SelectProfilesRequest) *profilev1.Profile {
   643  	t := sw.t
   644  	bidi := tc.client.MergeProfilesPprof(context.Background())
   645  	err := bidi.Send(&ingesterv1.MergeProfilesPprofRequest{
   646  		Request: q,
   647  	})
   648  	require.NoError(sw.t, err)
   649  
   650  	resp, err := bidi.Receive()
   651  	require.NoError(t, err)
   652  	require.Nil(t, resp.Result)
   653  	require.NotNilf(t, resp.SelectedProfiles, "res %+v", resp)
   654  	require.NotEmpty(t, resp.SelectedProfiles.Fingerprints)
   655  	require.NotEmpty(t, resp.SelectedProfiles.Profiles)
   656  
   657  	nProfiles := len(resp.SelectedProfiles.Profiles)
   658  
   659  	bools := make([]bool, nProfiles)
   660  	for i := 0; i < nProfiles; i++ {
   661  		bools[i] = true
   662  	}
   663  	require.NoError(t, bidi.Send(&ingesterv1.MergeProfilesPprofRequest{
   664  		Profiles: bools,
   665  	}))
   666  
   667  	// expect empty resp to signal it is finished
   668  	resp, err = bidi.Receive()
   669  	require.NoError(t, err)
   670  	require.Nil(t, resp.Result)
   671  	require.Nil(t, resp.SelectedProfiles)
   672  
   673  	resp, err = bidi.Receive()
   674  	require.NoError(t, err)
   675  	require.NotNil(t, resp.Result)
   676  
   677  	actualMerged := &profilev1.Profile{}
   678  	err = actualMerged.UnmarshalVT(resp.Result)
   679  	require.NoError(t, err)
   680  	return actualMerged
   681  }
   682  
   683  // millis
   684  func getStartEndTime(profiles []*pprofth.ProfileBuilder) (int64, int64) {
   685  	start := profiles[0].TimeNanos
   686  	end := profiles[0].TimeNanos
   687  	for _, p := range profiles {
   688  		if p.TimeNanos < start {
   689  			start = p.TimeNanos
   690  		}
   691  		if p.TimeNanos > end {
   692  			end = p.TimeNanos
   693  		}
   694  	}
   695  	start = start / 1e6
   696  	end = end / 1e6
   697  	end += 1
   698  	return start, end
   699  }
   700  
   701  func (sw *sw) getMetadataDLQ() []*metastorev1.BlockMeta {
   702  	objects := sw.bucket.Objects()
   703  	dlqFiles := []string{}
   704  	for s := range objects {
   705  		if isDLQPath(s) {
   706  			dlqFiles = append(dlqFiles, s)
   707  		}
   708  	}
   709  	slices.Sort(dlqFiles)
   710  	var metas []*metastorev1.BlockMeta
   711  	for _, s := range dlqFiles {
   712  		var meta = new(metastorev1.BlockMeta)
   713  		err := meta.UnmarshalVT(objects[s])
   714  		require.NoError(sw.t, err)
   715  		metas = append(metas, meta)
   716  	}
   717  	return metas
   718  }
   719  
   720  // nolint: unparam
   721  func (sw *sw) ingestChunk(t *testing.T, chunk inputChunk, expectAwaitError bool) map[segmentWaitFlushed]struct{} {
   722  	wg := sync.WaitGroup{}
   723  	waiterSet := make(map[segmentWaitFlushed]struct{})
   724  	mutex := new(sync.Mutex)
   725  	for i := range chunk {
   726  		it := chunk[i]
   727  		wg.Add(1)
   728  
   729  		go func() {
   730  			defer wg.Done()
   731  			awaiter := sw.ingest(shardKey(it.shard), func(head segmentIngest) {
   732  				p := it.profile.CloneVT() // important to not rewrite original profile
   733  				head.ingest(it.tenant, p, it.profile.UUID, it.profile.Labels, it.profile.Annotations)
   734  			})
   735  			err := awaiter.waitFlushed(context.Background())
   736  			if expectAwaitError {
   737  				require.Error(t, err)
   738  			} else {
   739  				require.NoError(t, err)
   740  			}
   741  			mutex.Lock()
   742  			waiterSet[awaiter] = struct{}{}
   743  			mutex.Unlock()
   744  		}()
   745  	}
   746  	wg.Wait()
   747  	return waiterSet
   748  }
   749  
   750  type input struct {
   751  	shard   uint32
   752  	tenant  string
   753  	profile *pprofth.ProfileBuilder
   754  }
   755  
   756  // tenant -> service -> sample
   757  type groupedInputs map[string]map[string]map[string][]*pprofth.ProfileBuilder
   758  
   759  type inputChunk []input
   760  
   761  type tenantClient struct {
   762  	tenant string
   763  	client ingesterv1connect.IngesterServiceClient
   764  	f      func()
   765  }
   766  
   767  // tenant -> block
   768  type tenantClients map[string]tenantClient
   769  
   770  func groupInputs(t *testing.T, chunks ...inputChunk) groupedInputs {
   771  	shardToTenantToServiceToSampleType := make(groupedInputs)
   772  	for _, chunk := range chunks {
   773  
   774  		for _, in := range chunk {
   775  			if _, ok := shardToTenantToServiceToSampleType[in.tenant]; !ok {
   776  				shardToTenantToServiceToSampleType[in.tenant] = make(map[string]map[string][]*pprofth.ProfileBuilder)
   777  			}
   778  			svc := ""
   779  			for _, lbl := range in.profile.Labels {
   780  				if lbl.Name == model.LabelNameServiceName {
   781  					svc = lbl.Value
   782  				}
   783  			}
   784  			require.NotEmptyf(t, svc, "service name not found in labels: %v", in.profile.Labels)
   785  			if _, ok := shardToTenantToServiceToSampleType[in.tenant][svc]; !ok {
   786  				shardToTenantToServiceToSampleType[in.tenant][svc] = make(map[string][]*pprofth.ProfileBuilder)
   787  			}
   788  			metricname := ""
   789  			for _, lbl := range in.profile.Labels {
   790  				if lbl.Name == prommodel.MetricNameLabel {
   791  					metricname = lbl.Value
   792  				}
   793  			}
   794  			require.NotEmptyf(t, metricname, "metric name not found in labels: %v", in.profile.Labels)
   795  			shardToTenantToServiceToSampleType[in.tenant][svc][metricname] = append(shardToTenantToServiceToSampleType[in.tenant][svc][metricname], in.profile)
   796  		}
   797  	}
   798  
   799  	return shardToTenantToServiceToSampleType
   800  
   801  }
   802  
   803  func cpuProfile(samples int, tsMillis int, svc string, stack ...string) *pprofth.ProfileBuilder {
   804  	return pprofth.NewProfileBuilder(int64(tsMillis*1e6)).
   805  		CPUProfile().
   806  		WithLabels(model.LabelNameServiceName, svc).
   807  		WithAnnotations("test annotation").
   808  		ForStacktraceString(stack...).
   809  		AddSamples([]int64{int64(samples)}...)
   810  }
   811  
   812  func memProfile(samples int, tsMillis int, svc string, stack ...string) *pprofth.ProfileBuilder {
   813  	v := int64(samples)
   814  	return pprofth.NewProfileBuilder(int64(tsMillis*1e6)).
   815  		MemoryProfile().
   816  		WithLabels(model.LabelNameServiceName, svc).
   817  		ForStacktraceString(stack...).
   818  		AddSamples([]int64{v, v * 1024, v, v * 1024}...)
   819  }
   820  
   821  func sampleTypesFromMetricName(t *testing.T, name string) []*typesv1.ProfileType {
   822  	if strings.Contains(name, "process_cpu") {
   823  		return []*typesv1.ProfileType{mustParseProfileSelector(t, "process_cpu:cpu:nanoseconds:cpu:nanoseconds")}
   824  	}
   825  	if strings.Contains(name, "memory") {
   826  		return []*typesv1.ProfileType{
   827  			mustParseProfileSelector(t, "memory:alloc_objects:count:space:bytes"),
   828  			mustParseProfileSelector(t, "memory:alloc_space:bytes:space:bytes"),
   829  			mustParseProfileSelector(t, "memory:inuse_objects:count:space:bytes"),
   830  			mustParseProfileSelector(t, "memory:inuse_space:bytes:space:bytes"),
   831  		}
   832  	}
   833  	require.Failf(t, "unknown metric name: %s", name)
   834  	return nil
   835  }
   836  
   837  func mustParseProfileSelector(t testing.TB, selector string) *typesv1.ProfileType {
   838  	ps, err := model.ParseProfileTypeSelector(selector)
   839  	require.NoError(t, err)
   840  	return ps
   841  }
   842  
   843  func mergeProfiles(t *testing.T, profiles []*profilev1.Profile) *profilev1.Profile {
   844  	gps := make([]*gprofile.Profile, 0, len(profiles))
   845  	for _, profile := range profiles {
   846  		gp := gprofileFromProtoProfile(t, profile)
   847  		gps = append(gps, gp)
   848  		gp.Compact()
   849  	}
   850  	merge, err := gprofile.Merge(gps)
   851  	require.NoError(t, err)
   852  
   853  	r := bytes.NewBuffer(nil)
   854  	err = merge.WriteUncompressed(r)
   855  	require.NoError(t, err)
   856  
   857  	msg := &profilev1.Profile{}
   858  	err = msg.UnmarshalVT(r.Bytes())
   859  	require.NoError(t, err)
   860  	return msg
   861  }
   862  
   863  func gprofileFromProtoProfile(t *testing.T, profile *profilev1.Profile) *gprofile.Profile {
   864  	data, err := profile.MarshalVT()
   865  	require.NoError(t, err)
   866  	p, err := gprofile.ParseData(data)
   867  	require.NoError(t, err)
   868  	return p
   869  }
   870  
   871  func staticTestData() []inputChunk {
   872  	return []inputChunk{
   873  		{
   874  			//todo check why it takes 10ms for each head
   875  			{shard: 1, tenant: "t1", profile: cpuProfile(42, 480, "svc1", "foo", "bar")},
   876  			{shard: 1, tenant: "t1", profile: cpuProfile(13, 233, "svc1", "qwe", "foo", "bar")},
   877  			{shard: 1, tenant: "t1", profile: cpuProfile(13, 472, "svc1", "qwe", "foo", "bar")},
   878  			{shard: 1, tenant: "t1", profile: cpuProfile(13, 961, "svc1", "qwe", "foo", "bar")},
   879  			{shard: 1, tenant: "t1", profile: cpuProfile(13, 56, "svc1", "qwe", "foo", "bar")},
   880  			{shard: 1, tenant: "t1", profile: cpuProfile(13, 549, "svc1", "qwe", "foo", "bar")},
   881  			{shard: 1, tenant: "t1", profile: memProfile(13, 146, "svc1", "qwe", "qwe", "foo", "bar")},
   882  			{shard: 1, tenant: "t1", profile: memProfile(43, 866, "svc1", "asd", "zxc")},
   883  			{shard: 1, tenant: "t1", profile: cpuProfile(07, 213, "svc2", "s3", "s2", "s1")},
   884  			{shard: 1, tenant: "t2", profile: cpuProfile(47, 540, "svc2", "s3", "s2", "s1")},
   885  			{shard: 1, tenant: "t2", profile: cpuProfile(77, 499, "svc3", "s3", "s2", "s1")},
   886  			{shard: 2, tenant: "t2", profile: cpuProfile(29, 859, "svc3", "s3", "s2", "s1")},
   887  			{shard: 2, tenant: "t2", profile: memProfile(11, 115, "svc3", "s3", "s2", "s1")},
   888  			{shard: 4, tenant: "t2", profile: memProfile(11, 304, "svc3", "s3", "s2", "s1")},
   889  		},
   890  		{
   891  			{shard: 1, tenant: "t1", profile: cpuProfile(05, 914, "svc1", "foo", "bar")},
   892  			{shard: 1, tenant: "t1", profile: cpuProfile(07, 290, "svc1", "qwe", "foo", "bar")},
   893  			{shard: 1, tenant: "t1", profile: cpuProfile(24, 748, "svc2", "s3", "s2", "s1")},
   894  			{shard: 2, tenant: "t3", profile: memProfile(23, 639, "svc3", "s3", "s2", "s1")},
   895  			{shard: 3, tenant: "t3", profile: memProfile(23, 912, "svc3", "s3", "s2", "s1")},
   896  			{shard: 3, tenant: "t3", profile: memProfile(33, 799, "svc3", "s2", "s1")},
   897  		},
   898  	}
   899  }
   900  
   901  type (
   902  	testDataGenerator struct {
   903  		seed     int64
   904  		chunks   int
   905  		profiles int
   906  		shards   int
   907  		tenants  int
   908  		services int
   909  	}
   910  )
   911  
   912  func (g testDataGenerator) generate() []inputChunk {
   913  	r := rand.New(rand.NewSource(g.seed))
   914  	tg := timestampGenerator{
   915  		m: make(map[int64]struct{}),
   916  		r: rand.New(rand.NewSource(r.Int63())),
   917  	}
   918  	chunks := make([]inputChunk, g.chunks)
   919  
   920  	services := make([]string, 0, g.services)
   921  	for i := 0; i < g.services; i++ {
   922  		services = append(services, fmt.Sprintf("svc%d", i))
   923  	}
   924  	tenatns := make([]string, 0, g.tenants)
   925  	for i := 0; i < g.tenants; i++ {
   926  		tenatns = append(tenatns, fmt.Sprintf("t%d", i))
   927  	}
   928  	const nFrames = 16384
   929  	frames := make([]string, 0, nFrames)
   930  	for i := 0; i < nFrames; i++ {
   931  		frames = append(frames, fmt.Sprintf("frame%d", i))
   932  	}
   933  
   934  	for i := range chunks {
   935  		chunk := make(inputChunk, 0, g.profiles)
   936  		for j := 0; j < g.profiles; j++ {
   937  			shard := r.Intn(g.shards)
   938  			tenant := tenatns[r.Intn(g.tenants)]
   939  			svc := services[r.Intn(g.services)]
   940  			stack := make([]string, 0, 3)
   941  			for i := 0; i < 3; i++ {
   942  				stack = append(stack, frames[r.Intn(nFrames)])
   943  			}
   944  			typ := r.Intn(2)
   945  			var p *pprofth.ProfileBuilder
   946  			nSamples := r.Intn(100)
   947  			ts := tg.next()
   948  			if typ == 0 {
   949  				p = cpuProfile(nSamples+1, ts, svc, stack...)
   950  			} else {
   951  				p = memProfile(nSamples+1, ts, svc, stack...)
   952  			}
   953  			chunk = append(chunk, input{shard: uint32(shard), tenant: tenant, profile: p})
   954  		}
   955  		chunks[i] = chunk
   956  	}
   957  	return chunks
   958  }
   959  
   960  type timestampGenerator struct {
   961  	m map[int64]struct{}
   962  	r *rand.Rand
   963  }
   964  
   965  func (g *timestampGenerator) next() int {
   966  	for {
   967  		ts := g.r.Int63n(100000000)
   968  		if _, ok := g.m[ts]; !ok {
   969  			g.m[ts] = struct{}{}
   970  			return int(ts)
   971  		}
   972  	}
   973  }
   974  
   975  func isDLQPath(p string) bool {
   976  	fs := strings.Split(p, "/")
   977  	return len(fs) == 5 &&
   978  		fs[0] == block.DirNameDLQ &&
   979  		fs[2] == block.DirNameAnonTenant &&
   980  		fs[4] == block.FileNameMetadataObject
   981  }
   982  
   983  func isSegmentPath(p string) bool {
   984  	fs := strings.Split(p, "/")
   985  	return len(fs) == 5 &&
   986  		fs[0] == block.DirNameSegment &&
   987  		fs[2] == block.DirNameAnonTenant &&
   988  		fs[4] == block.FileNameDataObject
   989  }
   990  
   991  func hasUnsymbolizedLabel(t *testing.T, block *metastorev1.BlockMeta) bool {
   992  	t.Helper()
   993  	for _, ds := range block.Datasets {
   994  		i := 0
   995  		for i < len(ds.Labels) {
   996  			n := int(ds.Labels[i])
   997  			i++
   998  			for j := 0; j < n; j++ {
   999  				keyIdx := ds.Labels[i]
  1000  				valIdx := ds.Labels[i+1]
  1001  				i += 2
  1002  				if block.StringTable[keyIdx] == metadata.LabelNameUnsymbolized && block.StringTable[valIdx] == "true" {
  1003  					return true
  1004  				}
  1005  			}
  1006  		}
  1007  	}
  1008  	return false
  1009  }
  1010  
  1011  type mockBucket struct {
  1012  	*memory.InMemBucket
  1013  	uploads atomic.Int64
  1014  }
  1015  
  1016  func (m *mockBucket) Upload(ctx context.Context, _ string, _ io.Reader, _ ...objstore.ObjectUploadOption) error {
  1017  	m.uploads.Add(1)
  1018  	<-ctx.Done()
  1019  	return ctx.Err()
  1020  }
  1021  
  1022  func TestUploadBlock_HedgedUploadLimiter(t *testing.T) {
  1023  	t.Run("disabled", func(t *testing.T) {
  1024  		t.Parallel()
  1025  
  1026  		bucket := &mockBucket{InMemBucket: memory.NewInMemBucket()}
  1027  		logger := test.NewTestingLogger(t)
  1028  
  1029  		var config Config
  1030  		config.RegisterFlags(flag.NewFlagSet("test", flag.PanicOnError))
  1031  		config.UploadTimeout = time.Millisecond * 250
  1032  		config.UploadHedgeAfter = time.Millisecond
  1033  		config.UploadHedgeRateMax = 0
  1034  		config.UploadHedgeRateBurst = 0
  1035  		config.UploadMaxRetries = 0
  1036  
  1037  		sw := &segmentsWriter{
  1038  			config:              config,
  1039  			logger:              logger,
  1040  			bucket:              bucket,
  1041  			hedgedUploadLimiter: rate.NewLimiter(rate.Limit(config.UploadHedgeRateMax), int(config.UploadHedgeRateBurst)),
  1042  			metrics:             newSegmentMetrics(nil),
  1043  		}
  1044  
  1045  		err := sw.uploadBlock(context.Background(), nil, new(metastorev1.BlockMeta), new(segment))
  1046  		require.ErrorIs(t, err, context.DeadlineExceeded)
  1047  		require.Equal(t, int64(1), bucket.uploads.Load())
  1048  	})
  1049  
  1050  	t.Run("available", func(t *testing.T) {
  1051  		t.Parallel()
  1052  
  1053  		bucket := &mockBucket{InMemBucket: memory.NewInMemBucket()}
  1054  		logger := test.NewTestingLogger(t)
  1055  
  1056  		var config Config
  1057  		config.RegisterFlags(flag.NewFlagSet("test", flag.PanicOnError))
  1058  		config.UploadTimeout = time.Millisecond * 250
  1059  		config.UploadHedgeAfter = time.Millisecond
  1060  		config.UploadHedgeRateMax = 10
  1061  		config.UploadHedgeRateBurst = 10
  1062  		config.UploadMaxRetries = 0
  1063  
  1064  		sw := &segmentsWriter{
  1065  			config:              config,
  1066  			logger:              logger,
  1067  			bucket:              bucket,
  1068  			hedgedUploadLimiter: rate.NewLimiter(rate.Limit(config.UploadHedgeRateMax), int(config.UploadHedgeRateBurst)),
  1069  			metrics:             newSegmentMetrics(nil),
  1070  		}
  1071  
  1072  		// To avoid flakiness: there are no guarantees that the
  1073  		// hedged request is triggered before the upload timeout
  1074  		// expiration.
  1075  		hedgedRequestTriggered := func() bool {
  1076  			bucket.uploads.Store(0)
  1077  			err := sw.uploadBlock(context.Background(), nil, new(metastorev1.BlockMeta), new(segment))
  1078  			return errors.Is(err, context.DeadlineExceeded) && int64(2) == bucket.uploads.Load()
  1079  		}
  1080  
  1081  		require.Eventually(t, hedgedRequestTriggered, time.Second*10, time.Millisecond*50)
  1082  	})
  1083  
  1084  	t.Run("exhausted", func(t *testing.T) {
  1085  		t.Parallel()
  1086  
  1087  		bucket := &mockBucket{InMemBucket: memory.NewInMemBucket()}
  1088  		logger := test.NewTestingLogger(t)
  1089  
  1090  		var config Config
  1091  		config.RegisterFlags(flag.NewFlagSet("test", flag.PanicOnError))
  1092  		config.UploadTimeout = time.Millisecond * 250
  1093  		config.UploadHedgeAfter = time.Millisecond
  1094  		config.UploadHedgeRateMax = 0.1
  1095  		config.UploadHedgeRateBurst = 10
  1096  		config.UploadMaxRetries = 0
  1097  
  1098  		sw := &segmentsWriter{
  1099  			config:              config,
  1100  			logger:              logger,
  1101  			bucket:              bucket,
  1102  			hedgedUploadLimiter: rate.NewLimiter(rate.Limit(config.UploadHedgeRateMax), int(config.UploadHedgeRateBurst)),
  1103  			metrics:             newSegmentMetrics(nil),
  1104  		}
  1105  
  1106  		require.True(t, sw.hedgedUploadLimiter.ReserveN(time.Now(), int(config.UploadHedgeRateBurst)).OK())
  1107  		err := sw.uploadBlock(context.Background(), nil, new(metastorev1.BlockMeta), new(segment))
  1108  		require.ErrorIs(t, err, context.DeadlineExceeded)
  1109  		require.Equal(t, int64(1), bucket.uploads.Load())
  1110  	})
  1111  }