github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/integration.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package integration
    22  
    23  import (
    24  	"fmt"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/cluster/shard"
    30  	"github.com/m3db/m3/src/dbnode/client"
    31  	"github.com/m3db/m3/src/dbnode/integration/generate"
    32  	"github.com/m3db/m3/src/dbnode/namespace"
    33  	persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
    34  	"github.com/m3db/m3/src/dbnode/runtime"
    35  	"github.com/m3db/m3/src/dbnode/storage"
    36  	"github.com/m3db/m3/src/dbnode/storage/bootstrap"
    37  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
    38  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
    39  	bfs "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
    40  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/peers"
    41  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/uninitialized"
    42  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
    43  	"github.com/m3db/m3/src/dbnode/storage/index"
    44  	"github.com/m3db/m3/src/dbnode/storage/index/compaction"
    45  	"github.com/m3db/m3/src/dbnode/storage/repair"
    46  	"github.com/m3db/m3/src/dbnode/topology"
    47  	"github.com/m3db/m3/src/dbnode/topology/testutil"
    48  	xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
    49  	"github.com/m3db/m3/src/m3ninx/doc"
    50  	"github.com/m3db/m3/src/m3ninx/index/segment/builder"
    51  	"github.com/m3db/m3/src/m3ninx/index/segment/fst"
    52  	idxpersist "github.com/m3db/m3/src/m3ninx/persist"
    53  	"github.com/m3db/m3/src/x/instrument"
    54  	xretry "github.com/m3db/m3/src/x/retry"
    55  	xtime "github.com/m3db/m3/src/x/time"
    56  
    57  	"github.com/stretchr/testify/require"
    58  	"github.com/uber-go/tally"
    59  	"go.uber.org/zap"
    60  )
    61  
    62  const (
    63  	multiAddrPortStart = 9000
    64  	multiAddrPortEach  = 5
    65  )
    66  
    67  // TODO: refactor and use m3x/clock ...
    68  type conditionFn func() bool
    69  
    70  func waitUntil(fn conditionFn, timeout time.Duration) bool {
    71  	deadline := time.Now().Add(timeout)
    72  	for time.Now().Before(deadline) {
    73  		if fn() {
    74  			return true
    75  		}
    76  		time.Sleep(100 * time.Millisecond)
    77  	}
    78  	return false
    79  }
    80  
    81  func newMultiAddrTestOptions(opts TestOptions, instance int) TestOptions {
    82  	bind := "127.0.0.1"
    83  	start := multiAddrPortStart + (instance * multiAddrPortEach)
    84  	return opts.
    85  		SetID(fmt.Sprintf("testhost%d", instance)).
    86  		SetTChannelNodeAddr(fmt.Sprintf("%s:%d", bind, start)).
    87  		SetTChannelClusterAddr(fmt.Sprintf("%s:%d", bind, start+1)).
    88  		SetHTTPNodeAddr(fmt.Sprintf("%s:%d", bind, start+2)).
    89  		SetHTTPClusterAddr(fmt.Sprintf("%s:%d", bind, start+3)).
    90  		SetHTTPDebugAddr(fmt.Sprintf("%s:%d", bind, start+4))
    91  }
    92  
    93  func newMultiAddrAdminClient(
    94  	t *testing.T,
    95  	adminOpts client.AdminOptions,
    96  	topologyInitializer topology.Initializer,
    97  	origin topology.Host,
    98  	instrumentOpts instrument.Options,
    99  	customOpts ...client.CustomAdminOption,
   100  ) client.AdminClient {
   101  	if adminOpts == nil {
   102  		adminOpts = client.NewAdminOptions()
   103  	}
   104  
   105  	adminOpts = adminOpts.
   106  		SetOrigin(origin).
   107  		SetInstrumentOptions(instrumentOpts).
   108  		SetClusterConnectConsistencyLevel(topology.ConnectConsistencyLevelAny).
   109  		SetTopologyInitializer(topologyInitializer).
   110  		SetClusterConnectTimeout(time.Second).(client.AdminOptions)
   111  
   112  	for _, o := range customOpts {
   113  		adminOpts = o(adminOpts)
   114  	}
   115  
   116  	adminClient, err := client.NewAdminClient(adminOpts)
   117  	require.NoError(t, err)
   118  
   119  	return adminClient
   120  }
   121  
   122  // BootstrappableTestSetupOptions defines options for test setups.
   123  type BootstrappableTestSetupOptions struct {
   124  	FinalBootstrapper                                   string
   125  	BootstrapBlocksBatchSize                            int
   126  	BootstrapBlocksConcurrency                          int
   127  	BootstrapConsistencyLevel                           topology.ReadConsistencyLevel
   128  	TopologyInitializer                                 topology.Initializer
   129  	TestStatsReporter                                   xmetrics.TestStatsReporter
   130  	DisableCommitLogBootstrapper                        bool
   131  	DisablePeersBootstrapper                            bool
   132  	UseTChannelClientForWriting                         bool
   133  	EnableRepairs                                       bool
   134  	ForceRepairs                                        bool
   135  	RepairType                                          repair.Type
   136  	AdminClientCustomOpts                               []client.CustomAdminOption
   137  	ShardsLeavingAndInitializingCountTowardsConsistency bool
   138  }
   139  
   140  type closeFn func()
   141  
   142  func newDefaulTestResultOptions(
   143  	storageOpts storage.Options,
   144  ) result.Options {
   145  	return result.NewOptions().
   146  		SetClockOptions(storageOpts.ClockOptions()).
   147  		SetInstrumentOptions(storageOpts.InstrumentOptions()).
   148  		SetDatabaseBlockOptions(storageOpts.DatabaseBlockOptions()).
   149  		SetSeriesCachePolicy(storageOpts.SeriesCachePolicy())
   150  }
   151  
   152  // NewDefaultBootstrappableTestSetups creates dbnode test setups.
   153  func NewDefaultBootstrappableTestSetups( // nolint:gocyclo
   154  	t *testing.T,
   155  	opts TestOptions,
   156  	setupOpts []BootstrappableTestSetupOptions,
   157  ) (testSetups, closeFn) {
   158  	var (
   159  		replicas        = len(setupOpts)
   160  		setups          []TestSetup
   161  		cleanupFns      []func()
   162  		cleanupFnsMutex sync.RWMutex
   163  
   164  		appendCleanupFn = func(fn func()) {
   165  			cleanupFnsMutex.Lock()
   166  			defer cleanupFnsMutex.Unlock()
   167  			cleanupFns = append(cleanupFns, fn)
   168  		}
   169  	)
   170  
   171  	shardSet, err := newTestShardSet(opts.NumShards(), opts.ShardSetOptions())
   172  	require.NoError(t, err)
   173  	for i := 0; i < replicas; i++ {
   174  		var (
   175  			instance                                            = i
   176  			usingCommitLogBootstrapper                          = !setupOpts[i].DisableCommitLogBootstrapper
   177  			usingPeersBootstrapper                              = !setupOpts[i].DisablePeersBootstrapper
   178  			finalBootstrapperToUse                              = setupOpts[i].FinalBootstrapper
   179  			useTChannelClientForWriting                         = setupOpts[i].UseTChannelClientForWriting
   180  			bootstrapBlocksBatchSize                            = setupOpts[i].BootstrapBlocksBatchSize
   181  			bootstrapBlocksConcurrency                          = setupOpts[i].BootstrapBlocksConcurrency
   182  			bootstrapConsistencyLevel                           = setupOpts[i].BootstrapConsistencyLevel
   183  			topologyInitializer                                 = setupOpts[i].TopologyInitializer
   184  			testStatsReporter                                   = setupOpts[i].TestStatsReporter
   185  			enableRepairs                                       = setupOpts[i].EnableRepairs
   186  			forceRepairs                                        = setupOpts[i].ForceRepairs
   187  			repairType                                          = setupOpts[i].RepairType
   188  			origin                                              topology.Host
   189  			instanceOpts                                        = newMultiAddrTestOptions(opts, instance)
   190  			adminClientCustomOpts                               = setupOpts[i].AdminClientCustomOpts
   191  			shardsLeavingAndInitializingCountTowardsConsistency = setupOpts[i].
   192  										ShardsLeavingAndInitializingCountTowardsConsistency
   193  		)
   194  
   195  		if finalBootstrapperToUse == "" {
   196  			finalBootstrapperToUse = bootstrapper.NoOpNoneBootstrapperName
   197  		}
   198  
   199  		if topologyInitializer == nil {
   200  			// Setup static topology initializer
   201  			var (
   202  				start         = multiAddrPortStart
   203  				hostShardSets []topology.HostShardSet
   204  			)
   205  
   206  			for i := 0; i < replicas; i++ {
   207  				id := fmt.Sprintf("testhost%d", i)
   208  				nodeAddr := fmt.Sprintf("127.0.0.1:%d", start+(i*multiAddrPortEach))
   209  				host := topology.NewHost(id, nodeAddr)
   210  				if i == instance {
   211  					origin = host
   212  				}
   213  				hostShardSet := topology.NewHostShardSet(host, shardSet)
   214  				hostShardSets = append(hostShardSets, hostShardSet)
   215  			}
   216  
   217  			staticOptions := topology.NewStaticOptions().
   218  				SetShardSet(shardSet).
   219  				SetReplicas(replicas).
   220  				SetHostShardSets(hostShardSets)
   221  			topologyInitializer = topology.NewStaticInitializer(staticOptions)
   222  		}
   223  
   224  		instanceOpts = instanceOpts.
   225  			SetClusterDatabaseTopologyInitializer(topologyInitializer).
   226  			SetUseTChannelClientForWriting(useTChannelClientForWriting).
   227  			SetShardsLeavingAndInitializingCountTowardsConsistency(shardsLeavingAndInitializingCountTowardsConsistency)
   228  
   229  		if i > 0 {
   230  			// NB(bodu): Need to reset the global counter of number of index
   231  			// claim manager instances after the initial node.
   232  			persistfs.ResetIndexClaimsManagersUnsafe()
   233  		}
   234  		setup, err := NewTestSetup(t, instanceOpts, nil, opts.StorageOptsFn())
   235  		require.NoError(t, err)
   236  		topologyInitializer = setup.TopologyInitializer()
   237  
   238  		instrumentOpts := setup.StorageOpts().InstrumentOptions()
   239  		logger := instrumentOpts.Logger()
   240  		logger = logger.With(zap.Int("instance", instance))
   241  		instrumentOpts = instrumentOpts.SetLogger(logger)
   242  		if testStatsReporter != nil {
   243  			scope, _ := tally.NewRootScope(tally.ScopeOptions{Reporter: testStatsReporter}, 100*time.Millisecond)
   244  			instrumentOpts = instrumentOpts.SetMetricsScope(scope)
   245  		}
   246  		setup.SetStorageOpts(setup.StorageOpts().SetInstrumentOptions(instrumentOpts))
   247  		var (
   248  			bsOpts            = newDefaulTestResultOptions(setup.StorageOpts())
   249  			finalBootstrapper bootstrap.BootstrapperProvider
   250  
   251  			adminOpts = client.NewAdminOptions().
   252  					SetTopologyInitializer(topologyInitializer).(client.AdminOptions).
   253  					SetOrigin(origin)
   254  
   255  			// Prevent integration tests from timing out when a node is down
   256  			retryOpts = xretry.NewOptions().
   257  					SetInitialBackoff(1 * time.Millisecond).
   258  					SetMaxRetries(1).
   259  					SetJitter(true)
   260  			retrier = xretry.NewRetrier(retryOpts)
   261  		)
   262  
   263  		switch finalBootstrapperToUse {
   264  		case bootstrapper.NoOpAllBootstrapperName:
   265  			finalBootstrapper = bootstrapper.NewNoOpAllBootstrapperProvider()
   266  		case bootstrapper.NoOpNoneBootstrapperName:
   267  			finalBootstrapper = bootstrapper.NewNoOpNoneBootstrapperProvider()
   268  		case uninitialized.UninitializedTopologyBootstrapperName:
   269  			finalBootstrapper = uninitialized.NewUninitializedTopologyBootstrapperProvider(
   270  				uninitialized.NewOptions().
   271  					SetInstrumentOptions(instrumentOpts), nil)
   272  		default:
   273  			panic(fmt.Sprintf(
   274  				"Unknown final bootstrapper to use: %v", finalBootstrapperToUse))
   275  		}
   276  
   277  		if bootstrapBlocksBatchSize > 0 {
   278  			adminOpts = adminOpts.SetFetchSeriesBlocksBatchSize(bootstrapBlocksBatchSize)
   279  		}
   280  		if bootstrapBlocksConcurrency > 0 {
   281  			adminOpts = adminOpts.SetFetchSeriesBlocksBatchConcurrency(bootstrapBlocksConcurrency)
   282  		}
   283  		adminOpts = adminOpts.SetStreamBlocksRetrier(retrier)
   284  		adminOpts.SetShardsLeavingAndInitializingCountTowardsConsistency(shardsLeavingAndInitializingCountTowardsConsistency)
   285  
   286  		adminClient := newMultiAddrAdminClient(
   287  			t, adminOpts, topologyInitializer, origin, instrumentOpts, adminClientCustomOpts...)
   288  		setup.SetStorageOpts(setup.StorageOpts().SetAdminClient(adminClient))
   289  
   290  		storageIdxOpts := setup.StorageOpts().IndexOptions()
   291  		fsOpts := setup.StorageOpts().CommitLogOptions().FilesystemOptions()
   292  		if usingPeersBootstrapper {
   293  			var (
   294  				runtimeOptsMgr = setup.StorageOpts().RuntimeOptionsManager()
   295  				runtimeOpts    = runtimeOptsMgr.Get().
   296  						SetClientBootstrapConsistencyLevel(bootstrapConsistencyLevel)
   297  			)
   298  			runtimeOptsMgr.Update(runtimeOpts)
   299  
   300  			peersOpts := peers.NewOptions().
   301  				SetResultOptions(bsOpts).
   302  				SetAdminClient(adminClient).
   303  				SetIndexOptions(storageIdxOpts).
   304  				SetFilesystemOptions(fsOpts).
   305  				// PersistManager need to be set or we will never execute
   306  				// the persist bootstrapping path
   307  				SetPersistManager(setup.StorageOpts().PersistManager()).
   308  				SetIndexClaimsManager(setup.StorageOpts().IndexClaimsManager()).
   309  				SetCompactor(newCompactor(t, storageIdxOpts)).
   310  				SetRuntimeOptionsManager(runtimeOptsMgr).
   311  				SetContextPool(setup.StorageOpts().ContextPool())
   312  
   313  			finalBootstrapper, err = peers.NewPeersBootstrapperProvider(peersOpts, finalBootstrapper)
   314  			require.NoError(t, err)
   315  		}
   316  
   317  		if usingCommitLogBootstrapper {
   318  			bootstrapCommitlogOpts := commitlog.NewOptions().
   319  				SetResultOptions(bsOpts).
   320  				SetCommitLogOptions(setup.StorageOpts().CommitLogOptions()).
   321  				SetRuntimeOptionsManager(runtime.NewOptionsManager())
   322  
   323  			finalBootstrapper, err = commitlog.NewCommitLogBootstrapperProvider(bootstrapCommitlogOpts,
   324  				mustInspectFilesystem(fsOpts), finalBootstrapper)
   325  			require.NoError(t, err)
   326  		}
   327  
   328  		persistMgr, err := persistfs.NewPersistManager(fsOpts)
   329  		require.NoError(t, err)
   330  
   331  		bfsOpts := bfs.NewOptions().
   332  			SetResultOptions(bsOpts).
   333  			SetFilesystemOptions(fsOpts).
   334  			SetIndexOptions(storageIdxOpts).
   335  			SetCompactor(newCompactor(t, storageIdxOpts)).
   336  			SetPersistManager(persistMgr).
   337  			SetIndexClaimsManager(setup.StorageOpts().IndexClaimsManager())
   338  
   339  		fsBootstrapper, err := bfs.NewFileSystemBootstrapperProvider(bfsOpts, finalBootstrapper)
   340  		require.NoError(t, err)
   341  
   342  		processOpts := bootstrap.NewProcessOptions().
   343  			SetTopologyMapProvider(setup).
   344  			SetOrigin(setup.Origin())
   345  		provider, err := bootstrap.NewProcessProvider(fsBootstrapper, processOpts, bsOpts, fsOpts)
   346  		require.NoError(t, err)
   347  
   348  		setup.SetStorageOpts(setup.StorageOpts().SetBootstrapProcessProvider(provider))
   349  
   350  		if enableRepairs {
   351  			setup.SetStorageOpts(setup.StorageOpts().
   352  				SetRepairEnabled(true).
   353  				SetRepairOptions(
   354  					setup.StorageOpts().RepairOptions().
   355  						SetType(repairType).
   356  						SetForce(forceRepairs).
   357  						SetRepairThrottle(time.Millisecond).
   358  						SetRepairCheckInterval(time.Millisecond).
   359  						SetAdminClients([]client.AdminClient{adminClient}).
   360  						SetDebugShadowComparisonsPercentage(1.0).
   361  						// Avoid log spam.
   362  						SetDebugShadowComparisonsEnabled(false)))
   363  		}
   364  
   365  		setups = append(setups, setup)
   366  		appendCleanupFn(func() {
   367  			setup.Close()
   368  		})
   369  	}
   370  
   371  	return setups, func() {
   372  		cleanupFnsMutex.RLock()
   373  		defer cleanupFnsMutex.RUnlock()
   374  		for _, fn := range cleanupFns {
   375  			fn()
   376  		}
   377  	}
   378  }
   379  
   380  func writeTestDataToDiskWithIndex(
   381  	metadata namespace.Metadata,
   382  	s TestSetup,
   383  	seriesMaps generate.SeriesBlocksByStart,
   384  ) error {
   385  	if err := writeTestDataToDisk(metadata, s, seriesMaps, 0); err != nil {
   386  		return err
   387  	}
   388  	for blockStart, series := range seriesMaps {
   389  		docs := generate.ToDocMetadata(series)
   390  		if err := writeTestIndexDataToDisk(
   391  			metadata,
   392  			s.StorageOpts(),
   393  			idxpersist.DefaultIndexVolumeType,
   394  			blockStart,
   395  			s.ShardSet().AllIDs(),
   396  			docs,
   397  		); err != nil {
   398  			return err
   399  		}
   400  	}
   401  	return nil
   402  }
   403  
   404  func writeTestDataToDisk(
   405  	metadata namespace.Metadata,
   406  	setup TestSetup,
   407  	seriesMaps generate.SeriesBlocksByStart,
   408  	volume int,
   409  	generatorOptionsFns ...func(generate.Options) generate.Options,
   410  ) error {
   411  	ropts := metadata.Options().RetentionOptions()
   412  	gOpts := setup.GeneratorOptions(ropts)
   413  	for _, fn := range generatorOptionsFns {
   414  		gOpts = fn(gOpts)
   415  	}
   416  	writer := generate.NewWriter(gOpts)
   417  	return writer.WriteData(namespace.NewContextFrom(metadata), setup.ShardSet(), seriesMaps, volume)
   418  }
   419  
   420  func writeTestSnapshotsToDiskWithPredicate(
   421  	metadata namespace.Metadata,
   422  	setup TestSetup,
   423  	seriesMaps generate.SeriesBlocksByStart,
   424  	volume int,
   425  	pred generate.WriteDatapointPredicate,
   426  	snapshotInterval time.Duration,
   427  ) error {
   428  	ropts := metadata.Options().RetentionOptions()
   429  	writer := generate.NewWriter(setup.GeneratorOptions(ropts))
   430  	return writer.WriteSnapshotWithPredicate(
   431  		namespace.NewContextFrom(metadata), setup.ShardSet(), seriesMaps, volume, pred, snapshotInterval)
   432  }
   433  
   434  func concatShards(a, b shard.Shards) shard.Shards {
   435  	all := append(a.All(), b.All()...)
   436  	return shard.NewShards(all)
   437  }
   438  
   439  func newClusterShardsRange(from, to uint32, s shard.State) shard.Shards {
   440  	return shard.NewShards(testutil.ShardsRange(from, to, s))
   441  }
   442  
   443  func newClusterEmptyShardsRange() shard.Shards {
   444  	return shard.NewShards(testutil.Shards(nil, shard.Available))
   445  }
   446  
   447  func waitUntilHasBootstrappedShardsExactly(
   448  	db storage.Database,
   449  	shards []uint32,
   450  ) {
   451  	for {
   452  		if hasBootstrappedShardsExactly(db, shards) {
   453  			return
   454  		}
   455  		time.Sleep(time.Second)
   456  	}
   457  }
   458  
   459  func hasBootstrappedShardsExactly(
   460  	db storage.Database,
   461  	shards []uint32,
   462  ) bool {
   463  	for _, namespace := range db.Namespaces() {
   464  		expect := make(map[uint32]struct{})
   465  		pending := make(map[uint32]struct{})
   466  		for _, shard := range shards {
   467  			expect[shard] = struct{}{}
   468  			pending[shard] = struct{}{}
   469  		}
   470  
   471  		for _, s := range namespace.Shards() {
   472  			if _, ok := expect[s.ID()]; !ok {
   473  				// Not expecting shard
   474  				return false
   475  			}
   476  			if s.IsBootstrapped() {
   477  				delete(pending, s.ID())
   478  			}
   479  		}
   480  
   481  		if len(pending) != 0 {
   482  			// Not all shards bootstrapped
   483  			return false
   484  		}
   485  	}
   486  
   487  	return true
   488  }
   489  
   490  func newCompactor(
   491  	t *testing.T,
   492  	opts index.Options,
   493  ) *compaction.Compactor {
   494  	compactor, err := newCompactorWithErr(opts)
   495  	require.NoError(t, err)
   496  	return compactor
   497  }
   498  
   499  func newCompactorWithErr(opts index.Options) (*compaction.Compactor, error) {
   500  	return compaction.NewCompactor(opts.MetadataArrayPool(),
   501  		index.MetadataArrayPoolCapacity,
   502  		opts.SegmentBuilderOptions(),
   503  		opts.FSTSegmentOptions(),
   504  		compaction.CompactorOptions{
   505  			FSTWriterOptions: &fst.WriterOptions{
   506  				// DisableRegistry is set to true to trade a larger FST size
   507  				// for a faster FST compaction since we want to reduce the end
   508  				// to end latency for time to first index a metric.
   509  				DisableRegistry: true,
   510  			},
   511  		})
   512  }
   513  
   514  func writeTestIndexDataToDisk(
   515  	md namespace.Metadata,
   516  	storageOpts storage.Options,
   517  	indexVolumeType idxpersist.IndexVolumeType,
   518  	blockStart xtime.UnixNano,
   519  	shards []uint32,
   520  	docs []doc.Metadata,
   521  ) error {
   522  	blockSize := md.Options().IndexOptions().BlockSize()
   523  	fsOpts := storageOpts.CommitLogOptions().FilesystemOptions()
   524  	writer, err := persistfs.NewIndexWriter(fsOpts)
   525  	if err != nil {
   526  		return err
   527  	}
   528  	segmentWriter, err := idxpersist.NewMutableSegmentFileSetWriter(fst.WriterOptions{})
   529  	if err != nil {
   530  		return err
   531  	}
   532  
   533  	shardsMap := make(map[uint32]struct{})
   534  	for _, shard := range shards {
   535  		shardsMap[shard] = struct{}{}
   536  	}
   537  	volumeIndex, err := persistfs.NextIndexFileSetVolumeIndex(
   538  		fsOpts.FilePathPrefix(),
   539  		md.ID(),
   540  		blockStart,
   541  	)
   542  	if err != nil {
   543  		return err
   544  	}
   545  	writerOpts := persistfs.IndexWriterOpenOptions{
   546  		Identifier: persistfs.FileSetFileIdentifier{
   547  			Namespace:   md.ID(),
   548  			BlockStart:  blockStart,
   549  			VolumeIndex: volumeIndex,
   550  		},
   551  		BlockSize:       blockSize,
   552  		Shards:          shardsMap,
   553  		IndexVolumeType: indexVolumeType,
   554  	}
   555  	if err := writer.Open(writerOpts); err != nil {
   556  		return err
   557  	}
   558  
   559  	builder, err := builder.NewBuilderFromDocuments(builder.NewOptions())
   560  	for _, doc := range docs {
   561  		_, err = builder.Insert(doc)
   562  		if err != nil {
   563  			return err
   564  		}
   565  	}
   566  
   567  	if err := segmentWriter.Reset(builder); err != nil {
   568  		return err
   569  	}
   570  	if err := writer.WriteSegmentFileSet(segmentWriter); err != nil {
   571  		return err
   572  	}
   573  	if err := builder.Close(); err != nil {
   574  		return err
   575  	}
   576  	return writer.Close()
   577  }