github.com/m3db/m3@v1.5.0/src/dbnode/persist/fs/read.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package fs
    22  
    23  import (
    24  	"bytes"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"os"
    29  	"sort"
    30  	"time"
    31  
    32  	"github.com/m3db/m3/src/dbnode/digest"
    33  	"github.com/m3db/m3/src/dbnode/persist"
    34  	"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
    35  	"github.com/m3db/m3/src/dbnode/persist/schema"
    36  	"github.com/m3db/m3/src/x/checked"
    37  	xerrors "github.com/m3db/m3/src/x/errors"
    38  	"github.com/m3db/m3/src/x/ident"
    39  	"github.com/m3db/m3/src/x/mmap"
    40  	"github.com/m3db/m3/src/x/pool"
    41  	"github.com/m3db/m3/src/x/serialize"
    42  	xtime "github.com/m3db/m3/src/x/time"
    43  
    44  	"go.uber.org/zap"
    45  )
    46  
    47  var (
    48  	// ErrCheckpointFileNotFound returned when the checkpoint file doesn't exist
    49  	ErrCheckpointFileNotFound = errors.New("checkpoint file does not exist")
    50  
    51  	// errReadNotExpectedSize returned when the size of the next read does not match size specified by the index
    52  	errReadNotExpectedSize = errors.New("next read not expected size")
    53  
    54  	errUnexpectedSortByOffset = errors.New("should not sort index by offsets when doing reads sorted by id")
    55  
    56  	errStreamingRequired    = errors.New("streaming must be enabled for streaming read methods")
    57  	errStreamingUnsupported = errors.New("streaming mode be disabled for non streaming read methods")
    58  )
    59  
    60  const (
    61  	mmapPersistFsDataName      = "mmap.persist.fs.data"
    62  	mmapPersistFsDataIndexName = "mmap.persist.fs.dataindex"
    63  )
    64  
    65  type reader struct {
    66  	opts          Options
    67  	hugePagesOpts mmap.HugeTLBOptions
    68  
    69  	filePathPrefix string
    70  	namespace      ident.ID
    71  
    72  	start     xtime.UnixNano
    73  	blockSize time.Duration
    74  
    75  	infoFdWithDigest           digest.FdWithDigestReader
    76  	bloomFilterWithDigest      digest.FdWithDigestReader
    77  	digestFdWithDigestContents digest.FdWithDigestContentsReader
    78  
    79  	indexFd                 *os.File
    80  	indexMmap               mmap.Descriptor
    81  	indexDecoderStream      dataFileSetReaderDecoderStream
    82  	indexEntriesByOffsetAsc []schema.IndexEntry
    83  
    84  	dataFd     *os.File
    85  	dataMmap   mmap.Descriptor
    86  	dataReader digest.ReaderWithDigest
    87  
    88  	bloomFilterFd *os.File
    89  
    90  	entries         int
    91  	bloomFilterInfo schema.IndexBloomFilterInfo
    92  	entriesRead     int
    93  	metadataRead    int
    94  	decoder         *msgpack.Decoder
    95  	digestBuf       digest.Buffer
    96  	bytesPool       pool.CheckedBytesPool
    97  	tagDecoderPool  serialize.TagDecoderPool
    98  
    99  	streamingID   ident.BytesID
   100  	streamingTags []byte
   101  	streamingData []byte
   102  
   103  	expectedInfoDigest        uint32
   104  	expectedIndexDigest       uint32
   105  	expectedDataDigest        uint32
   106  	expectedDigestOfDigest    uint32
   107  	expectedBloomFilterDigest uint32
   108  	shard                     uint32
   109  	volume                    int
   110  	open                      bool
   111  	streamingEnabled          bool
   112  }
   113  
   114  // NewReader returns a new reader and expects all files to exist. Will read the
   115  // index info in full on call to Open. The bytesPool can be passed as nil if callers
   116  // would prefer just dynamically allocated IDs and data.
   117  func NewReader(
   118  	bytesPool pool.CheckedBytesPool,
   119  	opts Options,
   120  ) (DataFileSetReader, error) {
   121  	if err := opts.Validate(); err != nil {
   122  		return nil, err
   123  	}
   124  	return &reader{
   125  		// When initializing new fields that should be static, be sure to save
   126  		// and reset them after Close() resets the fields to all default values.
   127  		opts:           opts,
   128  		filePathPrefix: opts.FilePathPrefix(),
   129  		hugePagesOpts: mmap.HugeTLBOptions{
   130  			Enabled:   opts.MmapEnableHugeTLB(),
   131  			Threshold: opts.MmapHugeTLBThreshold(),
   132  		},
   133  		infoFdWithDigest:           digest.NewFdWithDigestReader(opts.InfoReaderBufferSize()),
   134  		digestFdWithDigestContents: digest.NewFdWithDigestContentsReader(opts.InfoReaderBufferSize()),
   135  		bloomFilterWithDigest:      digest.NewFdWithDigestReader(opts.InfoReaderBufferSize()),
   136  		indexDecoderStream:         newReaderDecoderStream(),
   137  		dataReader:                 digest.NewReaderWithDigest(nil),
   138  		decoder:                    msgpack.NewDecoder(opts.DecodingOptions()),
   139  		digestBuf:                  digest.NewBuffer(),
   140  		bytesPool:                  bytesPool,
   141  		tagDecoderPool:             opts.TagDecoderPool(),
   142  	}, nil
   143  }
   144  
   145  func (r *reader) Open(opts DataReaderOpenOptions) error {
   146  	var (
   147  		namespace   = opts.Identifier.Namespace
   148  		shard       = opts.Identifier.Shard
   149  		blockStart  = opts.Identifier.BlockStart
   150  		volumeIndex = opts.Identifier.VolumeIndex
   151  		err         error
   152  	)
   153  
   154  	var (
   155  		shardDir            string
   156  		checkpointFilepath  string
   157  		infoFilepath        string
   158  		digestFilepath      string
   159  		bloomFilterFilepath string
   160  		indexFilepath       string
   161  		dataFilepath        string
   162  	)
   163  
   164  	r.streamingEnabled = opts.StreamingEnabled
   165  
   166  	switch opts.FileSetType {
   167  	case persist.FileSetSnapshotType:
   168  		shardDir = ShardSnapshotsDirPath(r.filePathPrefix, namespace, shard)
   169  		checkpointFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, CheckpointFileSuffix)
   170  		infoFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, InfoFileSuffix)
   171  		digestFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, DigestFileSuffix)
   172  		bloomFilterFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, bloomFilterFileSuffix)
   173  		indexFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, indexFileSuffix)
   174  		dataFilepath = FilesetPathFromTimeAndIndex(shardDir, blockStart, volumeIndex, dataFileSuffix)
   175  	case persist.FileSetFlushType:
   176  		shardDir = ShardDataDirPath(r.filePathPrefix, namespace, shard)
   177  
   178  		isLegacy := false
   179  		if volumeIndex == 0 {
   180  			isLegacy, err = isFirstVolumeLegacy(shardDir, blockStart, CheckpointFileSuffix)
   181  			if err != nil {
   182  				return err
   183  			}
   184  		}
   185  
   186  		checkpointFilepath = dataFilesetPathFromTimeAndIndex(
   187  			shardDir, blockStart, volumeIndex, CheckpointFileSuffix, isLegacy)
   188  		infoFilepath = dataFilesetPathFromTimeAndIndex(
   189  			shardDir, blockStart, volumeIndex, InfoFileSuffix, isLegacy)
   190  		digestFilepath = dataFilesetPathFromTimeAndIndex(
   191  			shardDir, blockStart, volumeIndex, DigestFileSuffix, isLegacy)
   192  		bloomFilterFilepath = dataFilesetPathFromTimeAndIndex(
   193  			shardDir, blockStart, volumeIndex, bloomFilterFileSuffix, isLegacy)
   194  		indexFilepath = dataFilesetPathFromTimeAndIndex(
   195  			shardDir, blockStart, volumeIndex, indexFileSuffix, isLegacy)
   196  		dataFilepath = dataFilesetPathFromTimeAndIndex(
   197  			shardDir, blockStart, volumeIndex, dataFileSuffix, isLegacy)
   198  
   199  	default:
   200  		return fmt.Errorf("unable to open reader with fileset type: %s", opts.FileSetType)
   201  	}
   202  
   203  	// If there is no checkpoint file, don't read the data files.
   204  	digest, err := readCheckpointFile(checkpointFilepath, r.digestBuf)
   205  	if err != nil {
   206  		return err
   207  	}
   208  	r.expectedDigestOfDigest = digest
   209  
   210  	var infoFd, digestFd *os.File
   211  	err = openFiles(os.Open, map[string]**os.File{
   212  		infoFilepath:        &infoFd,
   213  		digestFilepath:      &digestFd,
   214  		bloomFilterFilepath: &r.bloomFilterFd,
   215  	})
   216  	if err != nil {
   217  		return err
   218  	}
   219  
   220  	r.infoFdWithDigest.Reset(infoFd)
   221  	r.digestFdWithDigestContents.Reset(digestFd)
   222  
   223  	defer func() {
   224  		// NB(r): We don't need to keep these FDs open as we use these up front
   225  		r.infoFdWithDigest.Close()
   226  		r.digestFdWithDigestContents.Close()
   227  	}()
   228  
   229  	result, err := mmap.Files(os.Open, map[string]mmap.FileDesc{
   230  		indexFilepath: {
   231  			File:       &r.indexFd,
   232  			Descriptor: &r.indexMmap,
   233  			Options: mmap.Options{
   234  				Read:    true,
   235  				HugeTLB: r.hugePagesOpts,
   236  				ReporterOptions: mmap.ReporterOptions{
   237  					Context: mmap.Context{
   238  						Name: mmapPersistFsDataIndexName,
   239  					},
   240  					Reporter: r.opts.MmapReporter(),
   241  				},
   242  			},
   243  		},
   244  		dataFilepath: {
   245  			File:       &r.dataFd,
   246  			Descriptor: &r.dataMmap,
   247  			Options: mmap.Options{
   248  				Read:    true,
   249  				HugeTLB: r.hugePagesOpts,
   250  				ReporterOptions: mmap.ReporterOptions{
   251  					Context: mmap.Context{
   252  						Name: mmapPersistFsDataName,
   253  					},
   254  					Reporter: r.opts.MmapReporter(),
   255  				},
   256  			},
   257  		},
   258  	})
   259  	if err != nil {
   260  		return err
   261  	}
   262  
   263  	if warning := result.Warning; warning != nil {
   264  		logger := r.opts.InstrumentOptions().Logger()
   265  		logger.Warn("warning while mmapping files in reader", zap.Error(warning))
   266  	}
   267  
   268  	r.indexDecoderStream.Reset(r.indexMmap.Bytes)
   269  	r.dataReader.Reset(bytes.NewReader(r.dataMmap.Bytes))
   270  
   271  	if err := r.readDigest(); err != nil {
   272  		// Try to close if failed to read
   273  		r.Close()
   274  		return err
   275  	}
   276  	infoStat, err := infoFd.Stat()
   277  	if err != nil {
   278  		r.Close()
   279  		return err
   280  	}
   281  	if err := r.readInfo(int(infoStat.Size())); err != nil {
   282  		r.Close()
   283  		return err
   284  	}
   285  	if opts.StreamingEnabled {
   286  		r.decoder.Reset(r.indexDecoderStream)
   287  	} else if err := r.readIndexAndSortByOffsetAsc(); err != nil {
   288  		r.Close()
   289  		return err
   290  	}
   291  
   292  	r.open = true
   293  	r.namespace = namespace
   294  	r.shard = shard
   295  
   296  	return nil
   297  }
   298  
   299  func (r *reader) Status() DataFileSetReaderStatus {
   300  	return DataFileSetReaderStatus{
   301  		Open:       r.open,
   302  		Namespace:  r.namespace,
   303  		Shard:      r.shard,
   304  		Volume:     r.volume,
   305  		BlockStart: r.start,
   306  		BlockSize:  r.blockSize,
   307  	}
   308  }
   309  
   310  func (r *reader) readDigest() error {
   311  	fsDigests, err := readFileSetDigests(r.digestFdWithDigestContents)
   312  	if err != nil {
   313  		return err
   314  	}
   315  
   316  	err = r.digestFdWithDigestContents.Validate(r.expectedDigestOfDigest)
   317  	if err != nil {
   318  		return err
   319  	}
   320  
   321  	// Note that we skip over the summaries file digest here which is available,
   322  	// but we don't need
   323  	r.expectedInfoDigest = fsDigests.infoDigest
   324  	r.expectedIndexDigest = fsDigests.indexDigest
   325  	r.expectedBloomFilterDigest = fsDigests.bloomFilterDigest
   326  	r.expectedDataDigest = fsDigests.dataDigest
   327  
   328  	return nil
   329  }
   330  
   331  func (r *reader) readInfo(size int) error {
   332  	buf := make([]byte, size)
   333  	n, err := r.infoFdWithDigest.ReadAllAndValidate(buf, r.expectedInfoDigest)
   334  	if err != nil {
   335  		return err
   336  	}
   337  	r.decoder.Reset(msgpack.NewByteDecoderStream(buf[:n]))
   338  	info, err := r.decoder.DecodeIndexInfo()
   339  	if err != nil {
   340  		return err
   341  	}
   342  	r.start = xtime.UnixNano(info.BlockStart)
   343  	r.volume = info.VolumeIndex
   344  	r.blockSize = time.Duration(info.BlockSize)
   345  	r.entries = int(info.Entries)
   346  	r.entriesRead = 0
   347  	r.metadataRead = 0
   348  	r.bloomFilterInfo = info.BloomFilter
   349  	return nil
   350  }
   351  
   352  func (r *reader) readIndexAndSortByOffsetAsc() error {
   353  	if r.streamingEnabled {
   354  		return errUnexpectedSortByOffset
   355  	}
   356  
   357  	r.decoder.Reset(r.indexDecoderStream)
   358  	for i := 0; i < r.entries; i++ {
   359  		entry, err := r.decoder.DecodeIndexEntry(nil)
   360  		if err != nil {
   361  			return err
   362  		}
   363  		r.indexEntriesByOffsetAsc = append(r.indexEntriesByOffsetAsc, entry)
   364  	}
   365  	// NB(r): As we decode each block we need access to each index entry
   366  	// in the order we decode the data. This is only required for regular reads.
   367  	sort.Sort(indexEntriesByOffsetAsc(r.indexEntriesByOffsetAsc))
   368  
   369  	return nil
   370  }
   371  
   372  func (r *reader) StreamingRead() (StreamedDataEntry, error) {
   373  	if !r.streamingEnabled {
   374  		return StreamedDataEntry{}, errStreamingRequired
   375  	}
   376  
   377  	if r.entriesRead >= r.entries {
   378  		return StreamedDataEntry{}, io.EOF
   379  	}
   380  
   381  	entry, err := r.decoder.DecodeIndexEntry(nil)
   382  	if err != nil {
   383  		return StreamedDataEntry{}, err
   384  	}
   385  
   386  	if entry.Offset+entry.Size > int64(len(r.dataMmap.Bytes)) {
   387  		return StreamedDataEntry{}, fmt.Errorf(
   388  			"attempt to read beyond data file size (offset=%d, size=%d, file size=%d)",
   389  			entry.Offset, entry.Size, len(r.dataMmap.Bytes))
   390  	}
   391  	data := r.dataMmap.Bytes[entry.Offset : entry.Offset+entry.Size]
   392  
   393  	// NB(r): _must_ check the checksum against known checksum as the data
   394  	// file might not have been verified if we haven't read through the file yet.
   395  	if entry.DataChecksum != int64(digest.Checksum(data)) {
   396  		return StreamedDataEntry{}, errSeekChecksumMismatch
   397  	}
   398  
   399  	r.streamingData = append(r.streamingData[:0], data...)
   400  	r.streamingID = append(r.streamingID[:0], entry.ID...)
   401  	r.streamingTags = append(r.streamingTags[:0], entry.EncodedTags...)
   402  
   403  	r.entriesRead++
   404  
   405  	return StreamedDataEntry{
   406  		ID:           r.streamingID,
   407  		EncodedTags:  r.streamingTags,
   408  		Data:         r.streamingData,
   409  		DataChecksum: uint32(entry.DataChecksum),
   410  	}, nil
   411  }
   412  
   413  func (r *reader) Read() (ident.ID, ident.TagIterator, checked.Bytes, uint32, error) {
   414  	if r.streamingEnabled {
   415  		return nil, nil, nil, 0, errStreamingUnsupported
   416  	}
   417  
   418  	if r.entries > 0 && len(r.indexEntriesByOffsetAsc) < r.entries {
   419  		// Have not read the index yet, this is required when reading
   420  		// data as we need each index entry in order by by the offset ascending
   421  		if err := r.readIndexAndSortByOffsetAsc(); err != nil {
   422  			return nil, nil, nil, 0, err
   423  		}
   424  	}
   425  
   426  	if r.entriesRead >= r.entries {
   427  		return nil, nil, nil, 0, io.EOF
   428  	}
   429  
   430  	entry := r.indexEntriesByOffsetAsc[r.entriesRead]
   431  
   432  	var data checked.Bytes
   433  	if r.bytesPool != nil {
   434  		data = r.bytesPool.Get(int(entry.Size))
   435  		data.IncRef()
   436  		defer data.DecRef()
   437  		data.Resize(int(entry.Size))
   438  	} else {
   439  		data = checked.NewBytes(make([]byte, entry.Size), nil)
   440  		data.IncRef()
   441  		defer data.DecRef()
   442  	}
   443  
   444  	n, err := r.dataReader.Read(data.Bytes())
   445  	if err != nil {
   446  		return nil, nil, nil, 0, err
   447  	}
   448  	if n != int(entry.Size) {
   449  		return nil, nil, nil, 0, errReadNotExpectedSize
   450  	}
   451  
   452  	id := r.entryClonedID(entry.ID)
   453  	tags := r.entryClonedEncodedTagsIter(entry.EncodedTags)
   454  
   455  	r.entriesRead++
   456  	return id, tags, data, uint32(entry.DataChecksum), nil
   457  }
   458  
   459  func (r *reader) StreamingReadMetadata() (StreamedMetadataEntry, error) {
   460  	if !r.streamingEnabled {
   461  		return StreamedMetadataEntry{}, errStreamingRequired
   462  	}
   463  
   464  	if r.metadataRead >= r.entries {
   465  		return StreamedMetadataEntry{}, io.EOF
   466  	}
   467  
   468  	entry, err := r.decoder.DecodeIndexEntry(nil)
   469  	if err != nil {
   470  		return StreamedMetadataEntry{}, err
   471  	}
   472  
   473  	r.streamingID = append(r.streamingID[:0], entry.ID...)
   474  	r.streamingTags = append(r.streamingTags[:0], entry.EncodedTags...)
   475  
   476  	r.metadataRead++
   477  
   478  	return StreamedMetadataEntry{
   479  		ID:           r.streamingID,
   480  		EncodedTags:  r.streamingTags,
   481  		Length:       int(entry.Size),
   482  		DataChecksum: uint32(entry.DataChecksum),
   483  	}, nil
   484  }
   485  
   486  func (r *reader) ReadMetadata() (ident.ID, ident.TagIterator, int, uint32, error) {
   487  	if r.streamingEnabled {
   488  		return nil, nil, 0, 0, errStreamingUnsupported
   489  	}
   490  
   491  	if r.metadataRead >= r.entries {
   492  		return nil, nil, 0, 0, io.EOF
   493  	}
   494  
   495  	entry := r.indexEntriesByOffsetAsc[r.metadataRead]
   496  	id := r.entryClonedID(entry.ID)
   497  	tags := r.entryClonedEncodedTagsIter(entry.EncodedTags)
   498  	length := int(entry.Size)
   499  	checksum := uint32(entry.DataChecksum)
   500  
   501  	r.metadataRead++
   502  	return id, tags, length, checksum, nil
   503  }
   504  
   505  func (r *reader) ReadBloomFilter() (*ManagedConcurrentBloomFilter, error) {
   506  	return newManagedConcurrentBloomFilterFromFile(
   507  		r.bloomFilterFd,
   508  		r.bloomFilterWithDigest,
   509  		r.expectedBloomFilterDigest,
   510  		uint(r.bloomFilterInfo.NumElementsM),
   511  		uint(r.bloomFilterInfo.NumHashesK),
   512  		r.opts.ForceBloomFilterMmapMemory(),
   513  		mmap.ReporterOptions{
   514  			Reporter: r.opts.MmapReporter(),
   515  		},
   516  	)
   517  }
   518  
   519  func (r *reader) entryClonedBytes(bytes []byte) checked.Bytes {
   520  	var bytesClone checked.Bytes
   521  	if r.bytesPool != nil {
   522  		bytesClone = r.bytesPool.Get(len(bytes))
   523  	} else {
   524  		bytesClone = checked.NewBytes(make([]byte, 0, len(bytes)), nil)
   525  	}
   526  	bytesClone.IncRef()
   527  	bytesClone.AppendAll(bytes)
   528  	bytesClone.DecRef()
   529  	return bytesClone
   530  }
   531  
   532  func (r *reader) entryClonedID(id []byte) ident.ID {
   533  	return ident.BinaryID(r.entryClonedBytes(id))
   534  }
   535  
   536  func (r *reader) entryClonedEncodedTagsIter(encodedTags []byte) ident.TagIterator {
   537  	if len(encodedTags) == 0 {
   538  		// No tags set for this entry, return an empty tag iterator
   539  		return ident.EmptyTagIterator
   540  	}
   541  	decoder := r.tagDecoderPool.Get()
   542  	decoder.Reset(r.entryClonedBytes(encodedTags))
   543  	return decoder
   544  }
   545  
   546  // NB(xichen): Validate should be called after all data is read because
   547  // the digest is calculated for the entire data file.
   548  func (r *reader) Validate() error {
   549  	var multiErr xerrors.MultiError
   550  	multiErr = multiErr.Add(r.ValidateMetadata())
   551  	multiErr = multiErr.Add(r.ValidateData())
   552  	return multiErr.FinalError()
   553  }
   554  
   555  // NB(r): ValidateMetadata can be called immediately after Open(...) since
   556  // the metadata is read upfront.
   557  func (r *reader) ValidateMetadata() error {
   558  	err := r.indexDecoderStream.reader().Validate(r.expectedIndexDigest)
   559  	if err != nil {
   560  		return fmt.Errorf("could not validate index file: %v", err)
   561  	}
   562  	return nil
   563  }
   564  
   565  // NB(xichen): ValidateData should be called after all data is read because
   566  // the digest is calculated for the entire data file.
   567  func (r *reader) ValidateData() error {
   568  	err := r.dataReader.Validate(r.expectedDataDigest)
   569  	if err != nil {
   570  		return fmt.Errorf("could not validate data file: %v", err)
   571  	}
   572  	return nil
   573  }
   574  
   575  func (r *reader) Range() xtime.Range {
   576  	return xtime.Range{Start: r.start, End: r.start.Add(r.blockSize)}
   577  }
   578  
   579  func (r *reader) Entries() int {
   580  	return r.entries
   581  }
   582  
   583  func (r *reader) EntriesRead() int {
   584  	return r.entriesRead
   585  }
   586  
   587  func (r *reader) MetadataRead() int {
   588  	return r.metadataRead
   589  }
   590  
   591  func (r *reader) StreamingEnabled() bool {
   592  	return r.streamingEnabled
   593  }
   594  
   595  func (r *reader) Close() error {
   596  	// Close and prepare resources that are to be reused
   597  	multiErr := xerrors.NewMultiError()
   598  	multiErr = multiErr.Add(mmap.Munmap(r.indexMmap))
   599  	multiErr = multiErr.Add(mmap.Munmap(r.dataMmap))
   600  	multiErr = multiErr.Add(r.indexFd.Close())
   601  	multiErr = multiErr.Add(r.dataFd.Close())
   602  	multiErr = multiErr.Add(r.bloomFilterFd.Close())
   603  	r.indexDecoderStream.Reset(nil)
   604  	r.dataReader.Reset(nil)
   605  	for i := 0; i < len(r.indexEntriesByOffsetAsc); i++ {
   606  		r.indexEntriesByOffsetAsc[i].ID = nil
   607  	}
   608  	r.indexEntriesByOffsetAsc = r.indexEntriesByOffsetAsc[:0]
   609  
   610  	// Save fields we want to reassign after resetting struct
   611  	opts := r.opts
   612  	filePathPrefix := r.filePathPrefix
   613  	hugePagesOpts := r.hugePagesOpts
   614  	infoFdWithDigest := r.infoFdWithDigest
   615  	digestFdWithDigestContents := r.digestFdWithDigestContents
   616  	bloomFilterWithDigest := r.bloomFilterWithDigest
   617  	indexDecoderStream := r.indexDecoderStream
   618  	dataReader := r.dataReader
   619  	decoder := r.decoder
   620  	digestBuf := r.digestBuf
   621  	bytesPool := r.bytesPool
   622  	tagDecoderPool := r.tagDecoderPool
   623  	indexEntriesByOffsetAsc := r.indexEntriesByOffsetAsc
   624  
   625  	// Reset struct
   626  	*r = reader{}
   627  
   628  	// Reset the saved fields
   629  	r.opts = opts
   630  	r.filePathPrefix = filePathPrefix
   631  	r.hugePagesOpts = hugePagesOpts
   632  	r.infoFdWithDigest = infoFdWithDigest
   633  	r.digestFdWithDigestContents = digestFdWithDigestContents
   634  	r.bloomFilterWithDigest = bloomFilterWithDigest
   635  	r.indexDecoderStream = indexDecoderStream
   636  	r.dataReader = dataReader
   637  	r.decoder = decoder
   638  	r.digestBuf = digestBuf
   639  	r.bytesPool = bytesPool
   640  	r.tagDecoderPool = tagDecoderPool
   641  	r.indexEntriesByOffsetAsc = indexEntriesByOffsetAsc
   642  
   643  	return multiErr.FinalError()
   644  }
   645  
   646  // indexEntriesByOffsetAsc implements sort.Sort
   647  type indexEntriesByOffsetAsc []schema.IndexEntry
   648  
   649  func (e indexEntriesByOffsetAsc) Len() int {
   650  	return len(e)
   651  }
   652  
   653  func (e indexEntriesByOffsetAsc) Less(i, j int) bool {
   654  	return e[i].Offset < e[j].Offset
   655  }
   656  
   657  func (e indexEntriesByOffsetAsc) Swap(i, j int) {
   658  	e[i], e[j] = e[j], e[i]
   659  }