github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/persist/fs/types.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE
    20  
    21  package fs
    22  
    23  import (
    24  	"errors"
    25  	"io"
    26  	"os"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/encoding"
    30  	"github.com/m3db/m3/src/dbnode/namespace"
    31  	"github.com/m3db/m3/src/dbnode/persist"
    32  	"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
    33  	"github.com/m3db/m3/src/dbnode/runtime"
    34  	"github.com/m3db/m3/src/dbnode/sharding"
    35  	"github.com/m3db/m3/src/dbnode/storage/block"
    36  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
    37  	"github.com/m3db/m3/src/dbnode/storage/limits"
    38  	"github.com/m3db/m3/src/dbnode/ts"
    39  	"github.com/m3db/m3/src/dbnode/x/xio"
    40  	"github.com/m3db/m3/src/m3ninx/doc"
    41  	"github.com/m3db/m3/src/m3ninx/index/segment/fst"
    42  	idxpersist "github.com/m3db/m3/src/m3ninx/persist"
    43  	"github.com/m3db/m3/src/x/checked"
    44  	"github.com/m3db/m3/src/x/clock"
    45  	"github.com/m3db/m3/src/x/context"
    46  	"github.com/m3db/m3/src/x/ident"
    47  	"github.com/m3db/m3/src/x/instrument"
    48  	"github.com/m3db/m3/src/x/mmap"
    49  	"github.com/m3db/m3/src/x/pool"
    50  	"github.com/m3db/m3/src/x/serialize"
    51  	xtime "github.com/m3db/m3/src/x/time"
    52  )
    53  
    54  // ErrIndexOutOfRetention returned when reserving an index block or volume claim that is out of retention.
    55  var ErrIndexOutOfRetention = errors.New("out of retention index")
    56  
    57  // FileSetFileIdentifier contains all the information required to identify a FileSetFile.
    58  type FileSetFileIdentifier struct {
    59  	FileSetContentType persist.FileSetContentType
    60  	Namespace          ident.ID
    61  	BlockStart         xtime.UnixNano
    62  	// Only required for data content files
    63  	Shard uint32
    64  	// Required for snapshot files (index yes, data yes) and flush files (index yes, data yes)
    65  	VolumeIndex int
    66  }
    67  
    68  // DataWriterOpenOptions is the options struct for the Open method on the DataFileSetWriter.
    69  type DataWriterOpenOptions struct {
    70  	FileSetType        persist.FileSetType
    71  	FileSetContentType persist.FileSetContentType
    72  	Identifier         FileSetFileIdentifier
    73  	BlockSize          time.Duration
    74  	// Only used when writing snapshot files
    75  	Snapshot DataWriterSnapshotOptions
    76  }
    77  
    78  // DataWriterSnapshotOptions is the options struct for Open method on the DataFileSetWriter
    79  // that contains information specific to writing snapshot files.
    80  type DataWriterSnapshotOptions struct {
    81  	SnapshotTime xtime.UnixNano
    82  	SnapshotID   []byte
    83  }
    84  
    85  // DataFileSetWriter provides an unsynchronized writer for a TSDB file set.
    86  type DataFileSetWriter interface {
    87  	io.Closer
    88  
    89  	// Open opens the files for writing data to the given shard in the given namespace.
    90  	// This method is not thread-safe, so its the callers responsibilities that they never
    91  	// try and write two snapshot files for the same block start at the same time or their
    92  	// will be a race in determining the snapshot file's index.
    93  	Open(opts DataWriterOpenOptions) error
    94  
    95  	// Write will write the id and data pair and returns an error on a write error. Callers
    96  	// must not call this method with a given ID more than once.
    97  	Write(metadata persist.Metadata, data checked.Bytes, checksum uint32) error
    98  
    99  	// WriteAll will write the id and all byte slices and returns an error on a write error.
   100  	// Callers must not call this method with a given ID more than once.
   101  	WriteAll(metadata persist.Metadata, data []checked.Bytes, checksum uint32) error
   102  
   103  	// DeferClose returns a DataCloser that defers writing of a checkpoint file.
   104  	DeferClose() (persist.DataCloser, error)
   105  }
   106  
   107  // SnapshotMetadataFileWriter writes out snapshot metadata files.
   108  type SnapshotMetadataFileWriter interface {
   109  	Write(args SnapshotMetadataWriteArgs) error
   110  }
   111  
   112  // SnapshotMetadataFileReader reads snapshot metadata files.
   113  type SnapshotMetadataFileReader interface {
   114  	Read(id SnapshotMetadataIdentifier) (SnapshotMetadata, error)
   115  }
   116  
   117  // DataFileSetReaderStatus describes the status of a file set reader.
   118  type DataFileSetReaderStatus struct {
   119  	Namespace  ident.ID
   120  	BlockStart xtime.UnixNano
   121  	Shard      uint32
   122  	Volume     int
   123  	Open       bool
   124  	BlockSize  time.Duration
   125  }
   126  
   127  // DataReaderOpenOptions is options struct for the reader open method.
   128  type DataReaderOpenOptions struct {
   129  	// Identifier allows to identify a FileSetFile.
   130  	Identifier FileSetFileIdentifier
   131  	// FileSetType is the file set type.
   132  	FileSetType persist.FileSetType
   133  	// StreamingEnabled enables using streaming methods, such as
   134  	// DataFileSetReader.StreamingRead and DataFileSetReader.StreamingReadMetadata.
   135  	StreamingEnabled bool
   136  }
   137  
   138  // DataFileSetReader provides an unsynchronized reader for a TSDB file set.
   139  type DataFileSetReader interface {
   140  	io.Closer
   141  
   142  	// Open opens the files for the given shard and version for reading
   143  	Open(opts DataReaderOpenOptions) error
   144  
   145  	// Status returns the status of the reader
   146  	Status() DataFileSetReaderStatus
   147  
   148  	// StreamingRead returns the next unpooled id, encodedTags, data, checksum
   149  	// values ordered by id, or error, will return io.EOF at end of volume.
   150  	// Can only by used when DataReaderOpenOptions.StreamingEnabled is true.
   151  	// Use either StreamingRead or StreamingReadMetadata to progress through a volume, but not both.
   152  	// Note: the returned data gets invalidated on the next call to StreamingRead.
   153  	StreamingRead() (StreamedDataEntry, error)
   154  
   155  	// StreamingReadMetadata returns the next unpooled id, encodedTags, length checksum
   156  	// values ordered by id, or error; will return io.EOF at end of volume.
   157  	// Can only by used when DataReaderOpenOptions.StreamingEnabled is true.
   158  	// Use either StreamingRead or StreamingReadMetadata to progress through a volume, but not both.
   159  	// Note: the returned data get invalidated on the next call to StreamingReadMetadata.
   160  	StreamingReadMetadata() (StreamedMetadataEntry, error)
   161  
   162  	// Read returns the next id, tags, data, checksum tuple or error,
   163  	// will return io.EOF at end of volume.
   164  	// Use either Read or ReadMetadata to progress through a volume, but not both.
   165  	// Note: make sure to finalize the ID, close the Tags and finalize the Data when done with
   166  	// them so they can be returned to their respective pools.
   167  	Read() (id ident.ID, tags ident.TagIterator, data checked.Bytes, checksum uint32, err error)
   168  
   169  	// ReadMetadata returns the next id and metadata or error, will return io.EOF at end of volume.
   170  	// Use either Read or ReadMetadata to progress through a volume, but not both.
   171  	// Note: make sure to finalize the ID, and close the Tags when done with them so they can
   172  	// be returned to their respective pools.
   173  	ReadMetadata() (id ident.ID, tags ident.TagIterator, length int, checksum uint32, err error)
   174  
   175  	// ReadBloomFilter returns the bloom filter stored on disk in a container object that is safe
   176  	// for concurrent use and has a Close() method for releasing resources when done.
   177  	ReadBloomFilter() (*ManagedConcurrentBloomFilter, error)
   178  
   179  	// Validate validates both the metadata and data and returns an error if either is corrupted.
   180  	Validate() error
   181  
   182  	// ValidateMetadata validates the data and returns an error if the data is corrupted.
   183  	ValidateMetadata() error
   184  
   185  	// ValidateData validates the data and returns an error if the data is corrupted.
   186  	ValidateData() error
   187  
   188  	// Range returns the time range associated with data in the volume.
   189  	Range() xtime.Range
   190  
   191  	// Entries returns the count of entries in the volume.
   192  	Entries() int
   193  
   194  	// EntriesRead returns the position read into the volume.
   195  	EntriesRead() int
   196  
   197  	// MetadataRead returns the position of metadata read into the volume.
   198  	MetadataRead() int
   199  
   200  	// StreamingEnabled returns true if the reader is opened in streaming mode.
   201  	StreamingEnabled() bool
   202  }
   203  
   204  // DataFileSetSeeker provides an out of order reader for a TSDB file set.
   205  type DataFileSetSeeker interface {
   206  	io.Closer
   207  
   208  	// Open opens the files for the given shard and version for reading.
   209  	Open(
   210  		namespace ident.ID,
   211  		shard uint32,
   212  		start xtime.UnixNano,
   213  		volume int,
   214  		resources ReusableSeekerResources,
   215  	) error
   216  
   217  	// SeekByID returns the data for specified ID provided the index was loaded upon open. An
   218  	// error will be returned if the index was not loaded or ID cannot be found.
   219  	SeekByID(id ident.ID, resources ReusableSeekerResources) (data checked.Bytes, err error)
   220  
   221  	// SeekByIndexEntry is similar to Seek, but uses an IndexEntry instead of
   222  	// looking it up on its own. Useful in cases where you've already obtained an
   223  	// entry and don't want to waste resources looking it up again.
   224  	SeekByIndexEntry(entry IndexEntry, resources ReusableSeekerResources) (checked.Bytes, error)
   225  
   226  	// SeekIndexEntry returns the IndexEntry for the specified ID. This can be useful
   227  	// ahead of issuing a number of seek requests so that the seek requests can be
   228  	// made in order. The returned IndexEntry can also be passed to SeekByIndexEntry
   229  	// to prevent duplicate index lookups.
   230  	SeekIndexEntry(id ident.ID, resources ReusableSeekerResources) (IndexEntry, error)
   231  
   232  	// Range returns the time range associated with data in the volume
   233  	Range() xtime.Range
   234  
   235  	// ConcurrentIDBloomFilter returns a concurrency-safe bloom filter that can
   236  	// be used to quickly disqualify ID's that definitely do not exist. I.E if the
   237  	// Test() method returns true, the ID may exist on disk, but if it returns
   238  	// false, it definitely does not.
   239  	ConcurrentIDBloomFilter() *ManagedConcurrentBloomFilter
   240  
   241  	// ConcurrentClone clones a seeker, creating a copy that uses the same underlying resources
   242  	// (mmaps), but that is capable of seeking independently. The original can continue
   243  	// to be used after the clones are closed, but the clones cannot be used after the
   244  	// original is closed.
   245  	ConcurrentClone() (ConcurrentDataFileSetSeeker, error)
   246  }
   247  
   248  // ConcurrentDataFileSetSeeker is a limited interface that is returned when ConcurrentClone() is called
   249  // on DataFileSetSeeker. A seeker is essentially  a wrapper around file
   250  // descriptors around a set of files, allowing for interaction with them.
   251  // We can ask a seeker for a specific time series, which will then be streamed
   252  // out from the according data file.
   253  // The clones can be used together concurrently and share underlying resources.
   254  // Clones are no longer usable once the original has been closed.
   255  type ConcurrentDataFileSetSeeker interface {
   256  	io.Closer
   257  
   258  	// SeekByID is the same as in DataFileSetSeeker.
   259  	SeekByID(id ident.ID, resources ReusableSeekerResources) (data checked.Bytes, err error)
   260  
   261  	// SeekByIndexEntry is the same as in DataFileSetSeeker.
   262  	SeekByIndexEntry(entry IndexEntry, resources ReusableSeekerResources) (checked.Bytes, error)
   263  
   264  	// SeekIndexEntry is the same as in DataFileSetSeeker.
   265  	SeekIndexEntry(id ident.ID, resources ReusableSeekerResources) (IndexEntry, error)
   266  
   267  	// ConcurrentIDBloomFilter is the same as in DataFileSetSeeker.
   268  	ConcurrentIDBloomFilter() *ManagedConcurrentBloomFilter
   269  }
   270  
   271  // DataFileSetSeekerManager provides management of seekers for a TSDB namespace.
   272  type DataFileSetSeekerManager interface {
   273  	io.Closer
   274  
   275  	// Open opens the seekers for a given namespace.
   276  	Open(
   277  		md namespace.Metadata,
   278  		shardSet sharding.ShardSet,
   279  	) error
   280  
   281  	// CacheShardIndices will pre-parse the indexes for given shards
   282  	// to improve times when seeking to a block.
   283  	CacheShardIndices(shards []uint32) error
   284  
   285  	// AssignShardSet assigns current per ns shardset.
   286  	AssignShardSet(shardSet sharding.ShardSet)
   287  
   288  	// Borrow returns an open seeker for a given shard, block start time, and
   289  	// volume.
   290  	Borrow(shard uint32, start xtime.UnixNano) (ConcurrentDataFileSetSeeker, error)
   291  
   292  	// Return returns (closes) an open seeker for a given shard, block start
   293  	// time, and volume.
   294  	Return(shard uint32, start xtime.UnixNano, seeker ConcurrentDataFileSetSeeker) error
   295  
   296  	// Test checks if an ID exists in a concurrent ID bloom filter for a
   297  	// given shard, block, start time and volume.
   298  	Test(id ident.ID, shard uint32, start xtime.UnixNano) (bool, error)
   299  }
   300  
   301  // DataBlockRetriever provides a block retriever for TSDB file sets.
   302  type DataBlockRetriever interface {
   303  	io.Closer
   304  	block.DatabaseBlockRetriever
   305  
   306  	// Open the block retriever to retrieve from a namespace.
   307  	Open(
   308  		md namespace.Metadata,
   309  		shardSet sharding.ShardSet,
   310  	) error
   311  }
   312  
   313  // RetrievableDataBlockSegmentReader is a retrievable block reader.
   314  type RetrievableDataBlockSegmentReader interface {
   315  	xio.SegmentReader
   316  }
   317  
   318  // IndexWriterSnapshotOptions is a set of options for writing an index file set snapshot.
   319  type IndexWriterSnapshotOptions struct {
   320  	SnapshotTime xtime.UnixNano
   321  }
   322  
   323  // IndexWriterOpenOptions is a set of options when opening an index file set writer.
   324  type IndexWriterOpenOptions struct {
   325  	Identifier      FileSetFileIdentifier
   326  	BlockSize       time.Duration
   327  	FileSetType     persist.FileSetType
   328  	Shards          map[uint32]struct{}
   329  	IndexVolumeType idxpersist.IndexVolumeType
   330  
   331  	// Only used when writing snapshot files.
   332  	Snapshot IndexWriterSnapshotOptions
   333  }
   334  
   335  // IndexFileSetWriter is a index file set writer.
   336  type IndexFileSetWriter interface {
   337  	idxpersist.IndexFileSetWriter
   338  	io.Closer
   339  
   340  	// Open the index file set writer.
   341  	Open(opts IndexWriterOpenOptions) error
   342  }
   343  
   344  // IndexSegmentFileSetWriter is an index segment file set writer.
   345  type IndexSegmentFileSetWriter interface {
   346  	idxpersist.IndexSegmentFileSetWriter
   347  }
   348  
   349  // IndexSegmentFileSet is an index segment file set.
   350  type IndexSegmentFileSet interface {
   351  	idxpersist.IndexSegmentFileSet
   352  }
   353  
   354  // IndexSegmentFile is a file in an index segment file set.
   355  type IndexSegmentFile interface {
   356  	idxpersist.IndexSegmentFileSet
   357  }
   358  
   359  // IndexReaderOpenOptions is the index file set reader open options.
   360  type IndexReaderOpenOptions struct {
   361  	Identifier  FileSetFileIdentifier
   362  	FileSetType persist.FileSetType
   363  }
   364  
   365  // IndexReaderOpenResult describes the results of opening a
   366  // index file set volume.
   367  type IndexReaderOpenResult struct {
   368  	Shards map[uint32]struct{}
   369  }
   370  
   371  // IndexFileSetReader is an index file set reader.
   372  type IndexFileSetReader interface {
   373  	idxpersist.IndexFileSetReader
   374  	io.Closer
   375  
   376  	// Open the index file set reader.
   377  	Open(opts IndexReaderOpenOptions) (IndexReaderOpenResult, error)
   378  
   379  	// Validate returns whether all checksums were matched as expected,
   380  	// it must be called after reading all the segment file sets otherwise
   381  	// it returns an error.
   382  	Validate() error
   383  }
   384  
   385  // Options represents the options for filesystem persistence.
   386  type Options interface {
   387  	// Validate will validate the options and return an error if not valid.
   388  	Validate() error
   389  
   390  	// SetClockOptions sets the clock options.
   391  	SetClockOptions(value clock.Options) Options
   392  
   393  	// ClockOptions returns the clock options.
   394  	ClockOptions() clock.Options
   395  
   396  	// SetInstrumentOptions sets the instrumentation options.
   397  	SetInstrumentOptions(value instrument.Options) Options
   398  
   399  	// InstrumentOptions returns the instrumentation options.
   400  	InstrumentOptions() instrument.Options
   401  
   402  	// SetRuntimeOptionsManager sets the runtime options manager.
   403  	SetRuntimeOptionsManager(value runtime.OptionsManager) Options
   404  
   405  	// RuntimeOptionsManager returns the runtime options manager.
   406  	RuntimeOptionsManager() runtime.OptionsManager
   407  
   408  	// SetDecodingOptions sets the decoding options.
   409  	SetDecodingOptions(value msgpack.DecodingOptions) Options
   410  
   411  	// DecodingOptions returns the decoding options.
   412  	DecodingOptions() msgpack.DecodingOptions
   413  
   414  	// SetFilePathPrefix sets the file path prefix for sharded TSDB files.
   415  	SetFilePathPrefix(value string) Options
   416  
   417  	// FilePathPrefix returns the file path prefix for sharded TSDB files.
   418  	FilePathPrefix() string
   419  
   420  	// SetNewFileMode sets the new file mode.
   421  	SetNewFileMode(value os.FileMode) Options
   422  
   423  	// NewFileMode returns the new file mode.
   424  	NewFileMode() os.FileMode
   425  
   426  	// SetNewDirectoryMode sets the new directory mode.
   427  	SetNewDirectoryMode(value os.FileMode) Options
   428  
   429  	// NewDirectoryMode returns the new directory mode.
   430  	NewDirectoryMode() os.FileMode
   431  
   432  	// SetIndexSummariesPercent size sets the percent of index summaries to write.
   433  	SetIndexSummariesPercent(value float64) Options
   434  
   435  	// IndexSummariesPercent size returns the percent of index summaries to write.
   436  	IndexSummariesPercent() float64
   437  
   438  	// SetIndexBloomFilterFalsePositivePercent size sets the percent of false positive
   439  	// rate to use for the index bloom filter size and k hashes estimation.
   440  	SetIndexBloomFilterFalsePositivePercent(value float64) Options
   441  
   442  	// IndexBloomFilterFalsePositivePercent size returns the percent of false positive
   443  	// rate to use for the index bloom filter size and k hashes estimation.
   444  	IndexBloomFilterFalsePositivePercent() float64
   445  
   446  	// SetForceIndexSummariesMmapMemory sets whether the summaries files will be mmap'd
   447  	// as an anonymous region, or as a file.
   448  	SetForceIndexSummariesMmapMemory(value bool) Options
   449  
   450  	// ForceIndexSummariesMmapMemory returns whether the summaries files will be mmap'd
   451  	// as an anonymous region, or as a file.
   452  	ForceIndexSummariesMmapMemory() bool
   453  
   454  	// SetForceBloomFilterMmapMemory sets whether the bloom filters will be mmap'd.
   455  	// as an anonymous region, or as a file.
   456  	SetForceBloomFilterMmapMemory(value bool) Options
   457  
   458  	// ForceBloomFilterMmapMemory returns whether the bloom filters will be mmap'd.
   459  	// as an anonymous region, or as a file.
   460  	ForceBloomFilterMmapMemory() bool
   461  
   462  	// SetWriterBufferSize sets the buffer size for writing TSDB files.
   463  	SetWriterBufferSize(value int) Options
   464  
   465  	// WriterBufferSize returns the buffer size for writing TSDB files.
   466  	WriterBufferSize() int
   467  
   468  	// SetInfoReaderBufferSize sets the buffer size for reading TSDB info,
   469  	// digest and checkpoint files.
   470  	SetInfoReaderBufferSize(value int) Options
   471  
   472  	// InfoReaderBufferSize returns the buffer size for reading TSDB info,
   473  	// digest and checkpoint files.
   474  	InfoReaderBufferSize() int
   475  
   476  	// SetDataReaderBufferSize sets the buffer size for reading TSDB data and index files.
   477  	SetDataReaderBufferSize(value int) Options
   478  
   479  	// DataReaderBufferSize returns the buffer size for reading TSDB data and index files.
   480  	DataReaderBufferSize() int
   481  
   482  	// SetSeekReaderBufferSize size sets the buffer size for seeking TSDB files.
   483  	SetSeekReaderBufferSize(value int) Options
   484  
   485  	// SeekReaderBufferSize size returns the buffer size for seeking TSDB files.
   486  	SeekReaderBufferSize() int
   487  
   488  	// SetMmapEnableHugeTLB sets whether mmap huge pages are enabled when running on linux.
   489  	SetMmapEnableHugeTLB(value bool) Options
   490  
   491  	// MmapEnableHugeTLB returns whether mmap huge pages are enabled when running on linux.
   492  	MmapEnableHugeTLB() bool
   493  
   494  	// SetMmapHugeTLBThreshold sets the threshold when to use mmap huge pages for mmap'd files on linux.
   495  	SetMmapHugeTLBThreshold(value int64) Options
   496  
   497  	// MmapHugeTLBThreshold returns the threshold when to use mmap huge pages for mmap'd files on linux.
   498  	MmapHugeTLBThreshold() int64
   499  
   500  	// SetTagEncoderPool sets the tag encoder pool.
   501  	SetTagEncoderPool(value serialize.TagEncoderPool) Options
   502  
   503  	// TagEncoderPool returns the tag encoder pool.
   504  	TagEncoderPool() serialize.TagEncoderPool
   505  
   506  	// SetTagDecoderPool sets the tag decoder pool.
   507  	SetTagDecoderPool(value serialize.TagDecoderPool) Options
   508  
   509  	// TagDecoderPool returns the tag decoder pool.
   510  	TagDecoderPool() serialize.TagDecoderPool
   511  
   512  	// SetFSTOptions sets the fst options.
   513  	SetFSTOptions(value fst.Options) Options
   514  
   515  	// FSTOptions returns the fst options.
   516  	FSTOptions() fst.Options
   517  
   518  	// SetFSTWriterOptions sets the fst writer options.
   519  	SetFSTWriterOptions(value fst.WriterOptions) Options
   520  
   521  	// FSTWriterOptions returns the fst writer options.
   522  	FSTWriterOptions() fst.WriterOptions
   523  
   524  	// SetMmapReporter sets the mmap reporter.
   525  	SetMmapReporter(value mmap.Reporter) Options
   526  
   527  	// MmapReporter returns the mmap reporter.
   528  	MmapReporter() mmap.Reporter
   529  
   530  	// SetIndexReaderAutovalidateIndexSegments sets the index reader to
   531  	// autovalidate index segments data integrity on file open.
   532  	SetIndexReaderAutovalidateIndexSegments(value bool) Options
   533  
   534  	// IndexReaderAutovalidateIndexSegments returns the index reader to
   535  	// autovalidate index segments data integrity on file open.
   536  	IndexReaderAutovalidateIndexSegments() bool
   537  
   538  	// SetEncodingOptions sets the encoder options used by the encoder.
   539  	SetEncodingOptions(value msgpack.LegacyEncodingOptions) Options
   540  
   541  	// EncodingOptions returns the encoder options used by the encoder.
   542  	EncodingOptions() msgpack.LegacyEncodingOptions
   543  }
   544  
   545  // BlockRetrieverOptions represents the options for block retrieval.
   546  type BlockRetrieverOptions interface {
   547  	// Validate validates the options.
   548  	Validate() error
   549  
   550  	// SetRetrieveRequestPool sets the retrieve request pool.
   551  	SetRetrieveRequestPool(value RetrieveRequestPool) BlockRetrieverOptions
   552  
   553  	// RetrieveRequestPool returns the retrieve request pool.
   554  	RetrieveRequestPool() RetrieveRequestPool
   555  
   556  	// SetBytesPool sets the bytes pool.
   557  	SetBytesPool(value pool.CheckedBytesPool) BlockRetrieverOptions
   558  
   559  	// BytesPool returns the bytes pool.
   560  	BytesPool() pool.CheckedBytesPool
   561  
   562  	// SetFetchConcurrency sets the fetch concurrency.
   563  	SetFetchConcurrency(value int) BlockRetrieverOptions
   564  
   565  	// FetchConcurrency returns the fetch concurrency.
   566  	FetchConcurrency() int
   567  
   568  	// SetCacheBlocksOnRetrieve sets whether to cache blocks after retrieval at a global level.
   569  	SetCacheBlocksOnRetrieve(value bool) BlockRetrieverOptions
   570  
   571  	// CacheBlocksOnRetrieve returns whether to cache blocks after retrieval at a global level.
   572  	CacheBlocksOnRetrieve() bool
   573  
   574  	// SetIdentifierPool sets the identifierPool.
   575  	SetIdentifierPool(value ident.Pool) BlockRetrieverOptions
   576  
   577  	// IdentifierPool returns the identifierPool.
   578  	IdentifierPool() ident.Pool
   579  
   580  	// SetBlockLeaseManager sets the block leaser.
   581  	SetBlockLeaseManager(leaseMgr block.LeaseManager) BlockRetrieverOptions
   582  
   583  	// BlockLeaseManager returns the block leaser.
   584  	BlockLeaseManager() block.LeaseManager
   585  
   586  	// SetQueryLimits sets query limits.
   587  	SetQueryLimits(value limits.QueryLimits) BlockRetrieverOptions
   588  
   589  	// QueryLimits returns the query limits.
   590  	QueryLimits() limits.QueryLimits
   591  }
   592  
   593  // ForEachRemainingFn is the function that is run on each of the remaining
   594  // series of the merge target that did not intersect with the fileset.
   595  type ForEachRemainingFn func(seriesMetadata doc.Metadata, data block.FetchBlockResult) error
   596  
   597  // MergeWith is an interface that the fs merger uses to merge data with.
   598  type MergeWith interface {
   599  	// Read returns the data for the given block start and series ID, whether
   600  	// any data was found, and the error encountered (if any).
   601  	Read(
   602  		ctx context.Context,
   603  		seriesID ident.ID,
   604  		blockStart xtime.UnixNano,
   605  		nsCtx namespace.Context,
   606  	) ([]xio.BlockReader, bool, error)
   607  
   608  	// ForEachRemaining loops through each seriesID/blockStart combination that
   609  	// was not already handled by a call to Read().
   610  	ForEachRemaining(
   611  		ctx context.Context,
   612  		blockStart xtime.UnixNano,
   613  		fn ForEachRemainingFn,
   614  		nsCtx namespace.Context,
   615  	) error
   616  }
   617  
   618  // Merger is in charge of merging filesets with some target MergeWith interface.
   619  type Merger interface {
   620  	// Merge merges the specified fileset file with a merge target.
   621  	Merge(
   622  		fileID FileSetFileIdentifier,
   623  		mergeWith MergeWith,
   624  		nextVolumeIndex int,
   625  		flushPreparer persist.FlushPreparer,
   626  		nsCtx namespace.Context,
   627  		onFlush persist.OnFlushSeries,
   628  	) (persist.DataCloser, error)
   629  
   630  	// MergeAndCleanup merges the specified fileset file with a merge target and
   631  	// removes the previous version of the fileset. This should only be called
   632  	// within the bootstrapper. Any other file deletions outside of the
   633  	// bootstrapper should be handled by the CleanupManager.
   634  	MergeAndCleanup(
   635  		fileID FileSetFileIdentifier,
   636  		mergeWith MergeWith,
   637  		nextVolumeIndex int,
   638  		flushPreparer persist.FlushPreparer,
   639  		nsCtx namespace.Context,
   640  		onFlush persist.OnFlushSeries,
   641  		isBootstrapped bool,
   642  	) error
   643  }
   644  
   645  // NewMergerFn is the function to call to get a new Merger.
   646  type NewMergerFn func(
   647  	reader DataFileSetReader,
   648  	blockAllocSize int,
   649  	srPool xio.SegmentReaderPool,
   650  	multiIterPool encoding.MultiReaderIteratorPool,
   651  	identPool ident.Pool,
   652  	encoderPool encoding.EncoderPool,
   653  	contextPool context.Pool,
   654  	filePathPrefix string,
   655  	nsOpts namespace.Options,
   656  ) Merger
   657  
   658  // Segments represents on index segments on disk for an index volume.
   659  type Segments interface {
   660  	ShardTimeRanges() result.ShardTimeRanges
   661  	VolumeType() idxpersist.IndexVolumeType
   662  	VolumeIndex() int
   663  	AbsoluteFilePaths() []string
   664  	BlockStart() xtime.UnixNano
   665  }
   666  
   667  // IndexClaimsManager manages concurrent claims to volume indices per ns and block start.
   668  // This allows multiple threads to safely increment the volume index.
   669  type IndexClaimsManager interface {
   670  	ClaimNextIndexFileSetVolumeIndex(
   671  		md namespace.Metadata,
   672  		blockStart xtime.UnixNano,
   673  	) (int, error)
   674  }
   675  
   676  // StreamedDataEntry contains the data of single entry returned by streaming method.
   677  // The underlying data slices are reused and invalidated on every read.
   678  type StreamedDataEntry struct {
   679  	ID           ident.BytesID
   680  	EncodedTags  ts.EncodedTags
   681  	Data         []byte
   682  	DataChecksum uint32
   683  }
   684  
   685  // StreamedMetadataEntry contains the metadata of single entry returned by streaming method.
   686  // The underlying data slices are reused and invalidated on every read.
   687  type StreamedMetadataEntry struct {
   688  	ID           ident.BytesID
   689  	EncodedTags  ts.EncodedTags
   690  	Length       int
   691  	DataChecksum uint32
   692  }
   693  
   694  // NewReaderFn creates a new DataFileSetReader.
   695  type NewReaderFn func(bytesPool pool.CheckedBytesPool, opts Options) (DataFileSetReader, error)