github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/storage/index_test.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package storage
    22  
    23  import (
    24  	stdctx "context"
    25  	"errors"
    26  	"fmt"
    27  	"io/ioutil"
    28  	"os"
    29  	"sort"
    30  	"testing"
    31  	"time"
    32  
    33  	indexpb "github.com/m3db/m3/src/dbnode/generated/proto/index"
    34  	"github.com/m3db/m3/src/dbnode/namespace"
    35  	"github.com/m3db/m3/src/dbnode/persist"
    36  	"github.com/m3db/m3/src/dbnode/persist/fs"
    37  	"github.com/m3db/m3/src/dbnode/retention"
    38  	"github.com/m3db/m3/src/dbnode/storage/block"
    39  	"github.com/m3db/m3/src/dbnode/storage/index"
    40  	"github.com/m3db/m3/src/m3ninx/doc"
    41  	"github.com/m3db/m3/src/m3ninx/idx"
    42  	"github.com/m3db/m3/src/m3ninx/index/segment"
    43  	idxpersist "github.com/m3db/m3/src/m3ninx/persist"
    44  	"github.com/m3db/m3/src/x/context"
    45  	xerrors "github.com/m3db/m3/src/x/errors"
    46  	"github.com/m3db/m3/src/x/ident"
    47  	xtest "github.com/m3db/m3/src/x/test"
    48  	xtime "github.com/m3db/m3/src/x/time"
    49  
    50  	protobuftypes "github.com/gogo/protobuf/types"
    51  	"github.com/golang/mock/gomock"
    52  	opentracinglog "github.com/opentracing/opentracing-go/log"
    53  	"github.com/stretchr/testify/assert"
    54  	"github.com/stretchr/testify/require"
    55  )
    56  
    57  func TestNamespaceIndexCleanupExpiredFilesets(t *testing.T) {
    58  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
    59  	nsIdx, err := newNamespaceIndex(md,
    60  		namespace.NewRuntimeOptionsManager(md.ID().String()),
    61  		testShardSet, DefaultTestOptions())
    62  	require.NoError(t, err)
    63  
    64  	now := xtime.Now().Truncate(time.Hour)
    65  	idx := nsIdx.(*nsIndex)
    66  
    67  	oldestTime := now.Add(-time.Hour * 8)
    68  	files := []string{"abc"}
    69  
    70  	idx.indexFilesetsBeforeFn = func(
    71  		dir string, nsID ident.ID, exclusiveTime xtime.UnixNano) ([]string, error) {
    72  		require.True(t, oldestTime.Equal(exclusiveTime), fmt.Sprintf("%v %v", exclusiveTime, oldestTime))
    73  		return files, nil
    74  	}
    75  	idx.deleteFilesFn = func(s []string) error {
    76  		require.Equal(t, files, s)
    77  		return nil
    78  	}
    79  	require.NoError(t, idx.CleanupExpiredFileSets(now))
    80  }
    81  
    82  func TestNamespaceIndexCleanupDuplicateFilesets(t *testing.T) {
    83  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
    84  	nsIdx, err := newNamespaceIndex(md,
    85  		namespace.NewRuntimeOptionsManager(md.ID().String()),
    86  		testShardSet, DefaultTestOptions())
    87  	require.NoError(t, err)
    88  
    89  	idx := nsIdx.(*nsIndex)
    90  	now := xtime.Now().Truncate(time.Hour)
    91  	indexBlockSize := 2 * time.Hour
    92  	blockTime := now.Add(-2 * indexBlockSize)
    93  
    94  	dir, err := ioutil.TempDir("", t.Name())
    95  	require.NoError(t, err)
    96  
    97  	defer os.RemoveAll(dir)
    98  
    99  	fset1, err := ioutil.TempFile(dir, "fileset-9000-0-")
   100  	require.NoError(t, err)
   101  	fset2, err := ioutil.TempFile(dir, "fileset-9000-1-")
   102  	require.NoError(t, err)
   103  	fset3, err := ioutil.TempFile(dir, "fileset-9000-2-")
   104  	require.NoError(t, err)
   105  
   106  	volumeType := "extra"
   107  	infoFiles := []fs.ReadIndexInfoFileResult{
   108  		{
   109  			Info: indexpb.IndexVolumeInfo{
   110  				BlockStart: int64(blockTime),
   111  				BlockSize:  int64(indexBlockSize),
   112  				Shards:     []uint32{0, 1, 2},
   113  				IndexVolumeType: &protobuftypes.StringValue{
   114  					Value: volumeType,
   115  				},
   116  			},
   117  			AbsoluteFilePaths: []string{fset1.Name()},
   118  		},
   119  		{
   120  			Info: indexpb.IndexVolumeInfo{
   121  				BlockStart: int64(blockTime),
   122  				BlockSize:  int64(indexBlockSize),
   123  				Shards:     []uint32{0, 1, 2},
   124  				IndexVolumeType: &protobuftypes.StringValue{
   125  					Value: volumeType,
   126  				},
   127  			},
   128  			AbsoluteFilePaths: []string{fset2.Name()},
   129  		},
   130  		{
   131  			Info: indexpb.IndexVolumeInfo{
   132  				BlockStart: int64(blockTime),
   133  				BlockSize:  int64(indexBlockSize),
   134  				Shards:     []uint32{0, 1, 2, 3},
   135  				IndexVolumeType: &protobuftypes.StringValue{
   136  					Value: volumeType,
   137  				},
   138  			},
   139  			AbsoluteFilePaths: []string{fset3.Name()},
   140  		},
   141  	}
   142  
   143  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   144  		return infoFiles
   145  	}
   146  
   147  	idx.deleteFilesFn = func(s []string) error {
   148  		require.Equal(t, []string{fset1.Name(), fset2.Name()}, s)
   149  		multiErr := xerrors.NewMultiError()
   150  		for _, file := range s {
   151  			multiErr = multiErr.Add(os.Remove(file))
   152  		}
   153  		return multiErr.FinalError()
   154  	}
   155  	require.NoError(t, idx.CleanupDuplicateFileSets([]uint32{0, 1, 2, 3}))
   156  }
   157  
   158  func TestNamespaceIndexCleanupDuplicateFilesets_SortingByBlockStartAndVolumeType(t *testing.T) {
   159  	blockStart1 := xtime.Now().Truncate(2 * time.Hour)
   160  	blockStart2 := blockStart1.Add(-2 * time.Hour)
   161  
   162  	filesets := []struct {
   163  		blockStart   xtime.UnixNano
   164  		volumeType   string
   165  		volumeIndex  int
   166  		shouldRemove bool
   167  	}{
   168  		{
   169  			blockStart:   blockStart1,
   170  			volumeType:   "default",
   171  			volumeIndex:  0,
   172  			shouldRemove: false,
   173  		},
   174  		{
   175  			blockStart:   blockStart1,
   176  			volumeType:   "extra",
   177  			volumeIndex:  1,
   178  			shouldRemove: false,
   179  		},
   180  		{
   181  			blockStart:   blockStart1,
   182  			volumeType:   "extra",
   183  			volumeIndex:  0,
   184  			shouldRemove: true,
   185  		},
   186  		{
   187  			blockStart:   blockStart2,
   188  			volumeType:   "default",
   189  			volumeIndex:  1,
   190  			shouldRemove: true,
   191  		},
   192  		{
   193  			blockStart:   blockStart2,
   194  			volumeType:   "default",
   195  			volumeIndex:  2,
   196  			shouldRemove: false,
   197  		},
   198  		{
   199  			blockStart:   blockStart2,
   200  			volumeType:   "default",
   201  			volumeIndex:  0,
   202  			shouldRemove: true,
   203  		},
   204  	}
   205  
   206  	shards := []uint32{1, 2}
   207  	expectedFilesToRemove := make([]string, 0)
   208  	infoFiles := make([]fs.ReadIndexInfoFileResult, 0)
   209  	for _, fileset := range filesets {
   210  		infoFile := newReadIndexInfoFileResult(fileset.blockStart, fileset.volumeType, fileset.volumeIndex, shards)
   211  		infoFiles = append(infoFiles, infoFile)
   212  		if fileset.shouldRemove {
   213  			expectedFilesToRemove = append(expectedFilesToRemove, infoFile.AbsoluteFilePaths...)
   214  		}
   215  	}
   216  
   217  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   218  	nsIdx, err := newNamespaceIndex(md,
   219  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   220  		testShardSet, DefaultTestOptions())
   221  	require.NoError(t, err)
   222  	idx := nsIdx.(*nsIndex)
   223  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   224  		return infoFiles
   225  	}
   226  	idx.deleteFilesFn = func(s []string) error {
   227  		require.Len(t, s, len(expectedFilesToRemove))
   228  		for _, e := range expectedFilesToRemove {
   229  			assert.Contains(t, s, e)
   230  		}
   231  		return nil
   232  	}
   233  	require.NoError(t, idx.CleanupDuplicateFileSets(shards))
   234  }
   235  
   236  func TestNamespaceIndexCleanupDuplicateFilesets_ChangingShardList(t *testing.T) {
   237  	shardLists := []struct {
   238  		shards       []uint32
   239  		shouldRemove bool
   240  	}{
   241  		{
   242  			shards:       []uint32{1, 2},
   243  			shouldRemove: true,
   244  		},
   245  		{
   246  			shards:       []uint32{1, 2, 3},
   247  			shouldRemove: false,
   248  		},
   249  		{
   250  			shards:       []uint32{1, 2, 4},
   251  			shouldRemove: true,
   252  		},
   253  		{
   254  			shards:       []uint32{1, 2, 4},
   255  			shouldRemove: true,
   256  		},
   257  		{
   258  			shards:       []uint32{1, 5},
   259  			shouldRemove: true,
   260  		},
   261  		{
   262  			shards:       []uint32{1, 2, 4, 5},
   263  			shouldRemove: false,
   264  		},
   265  		{
   266  			shards:       []uint32{1, 2},
   267  			shouldRemove: false,
   268  		},
   269  	}
   270  
   271  	blockStart := xtime.Now().Truncate(2 * time.Hour)
   272  	expectedFilesToRemove := make([]string, 0)
   273  	infoFiles := make([]fs.ReadIndexInfoFileResult, 0)
   274  	for i, shardList := range shardLists {
   275  		infoFile := newReadIndexInfoFileResult(blockStart, "default", i, shardList.shards)
   276  		infoFiles = append(infoFiles, infoFile)
   277  		if shardList.shouldRemove {
   278  			expectedFilesToRemove = append(expectedFilesToRemove, infoFile.AbsoluteFilePaths...)
   279  		}
   280  	}
   281  
   282  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   283  	nsIdx, err := newNamespaceIndex(md,
   284  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   285  		testShardSet, DefaultTestOptions())
   286  	require.NoError(t, err)
   287  	idx := nsIdx.(*nsIndex)
   288  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   289  		return infoFiles
   290  	}
   291  	idx.deleteFilesFn = func(s []string) error {
   292  		require.Len(t, s, len(expectedFilesToRemove))
   293  		for _, e := range expectedFilesToRemove {
   294  			assert.Contains(t, s, e)
   295  		}
   296  		return nil
   297  	}
   298  
   299  	require.NoError(t, idx.CleanupDuplicateFileSets([]uint32{1, 2, 3, 4, 5}))
   300  }
   301  
   302  func TestNamespaceIndexCleanupDuplicateFilesets_IgnoreNonActiveShards(t *testing.T) {
   303  	activeShards := []uint32{1, 2}
   304  	shardLists := []struct {
   305  		shards       []uint32
   306  		shouldRemove bool
   307  	}{
   308  		{
   309  			shards:       []uint32{1, 2, 3, 4},
   310  			shouldRemove: true,
   311  		},
   312  		{
   313  			shards:       []uint32{1, 2, 3},
   314  			shouldRemove: true,
   315  		},
   316  		{
   317  			shards:       []uint32{1, 2},
   318  			shouldRemove: false,
   319  		},
   320  	}
   321  
   322  	blockStart := xtime.Now().Truncate(2 * time.Hour)
   323  	expectedFilesToRemove := make([]string, 0)
   324  	infoFiles := make([]fs.ReadIndexInfoFileResult, 0)
   325  	for i, shardList := range shardLists {
   326  		infoFile := newReadIndexInfoFileResult(blockStart, "default", i, shardList.shards)
   327  		infoFiles = append(infoFiles, infoFile)
   328  		if shardList.shouldRemove {
   329  			expectedFilesToRemove = append(expectedFilesToRemove, infoFile.AbsoluteFilePaths...)
   330  		}
   331  	}
   332  
   333  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   334  	nsIdx, err := newNamespaceIndex(md,
   335  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   336  		testShardSet, DefaultTestOptions())
   337  	require.NoError(t, err)
   338  	idx := nsIdx.(*nsIndex)
   339  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   340  		return infoFiles
   341  	}
   342  	idx.deleteFilesFn = func(s []string) error {
   343  		require.Len(t, s, len(expectedFilesToRemove))
   344  		for _, e := range expectedFilesToRemove {
   345  			assert.Contains(t, s, e)
   346  		}
   347  		return nil
   348  	}
   349  
   350  	require.NoError(t, idx.CleanupDuplicateFileSets(activeShards))
   351  }
   352  
   353  func TestNamespaceIndexCleanupDuplicateFilesets_NoActiveShards(t *testing.T) {
   354  	activeShards := []uint32{}
   355  	shardLists := []struct {
   356  		shards       []uint32
   357  		shouldRemove bool
   358  	}{
   359  		{
   360  			shards:       []uint32{1, 2, 3, 4},
   361  			shouldRemove: true,
   362  		},
   363  		{
   364  			shards:       []uint32{1, 2, 3},
   365  			shouldRemove: true,
   366  		},
   367  		{
   368  			shards:       []uint32{1, 2},
   369  			shouldRemove: false,
   370  		},
   371  	}
   372  
   373  	blockStart := xtime.Now().Truncate(2 * time.Hour)
   374  	expectedFilesToRemove := make([]string, 0)
   375  	infoFiles := make([]fs.ReadIndexInfoFileResult, 0)
   376  	for i, shardList := range shardLists {
   377  		infoFile := newReadIndexInfoFileResult(blockStart, "default", i, shardList.shards)
   378  		infoFiles = append(infoFiles, infoFile)
   379  		if shardList.shouldRemove {
   380  			expectedFilesToRemove = append(expectedFilesToRemove, infoFile.AbsoluteFilePaths...)
   381  		}
   382  	}
   383  
   384  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   385  	nsIdx, err := newNamespaceIndex(md,
   386  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   387  		testShardSet, DefaultTestOptions())
   388  	require.NoError(t, err)
   389  	idx := nsIdx.(*nsIndex)
   390  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   391  		return infoFiles
   392  	}
   393  	idx.deleteFilesFn = func(s []string) error {
   394  		require.Len(t, s, len(expectedFilesToRemove))
   395  		for _, e := range expectedFilesToRemove {
   396  			assert.Contains(t, s, e)
   397  		}
   398  		return nil
   399  	}
   400  
   401  	require.NoError(t, idx.CleanupDuplicateFileSets(activeShards))
   402  }
   403  
   404  func TestNamespaceIndexCleanupDuplicateFilesetsNoop(t *testing.T) {
   405  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   406  	nsIdx, err := newNamespaceIndex(md,
   407  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   408  		testShardSet, DefaultTestOptions())
   409  	require.NoError(t, err)
   410  
   411  	idx := nsIdx.(*nsIndex)
   412  	now := xtime.Now().Truncate(time.Hour)
   413  	indexBlockSize := 2 * time.Hour
   414  	blockTime := now.Add(-2 * indexBlockSize)
   415  
   416  	dir, err := ioutil.TempDir("", t.Name())
   417  	require.NoError(t, err)
   418  
   419  	defer os.RemoveAll(dir)
   420  
   421  	fset1, err := ioutil.TempFile(dir, "fileset-9000-0-")
   422  	require.NoError(t, err)
   423  	fset2, err := ioutil.TempFile(dir, "fileset-9000-1-")
   424  	require.NoError(t, err)
   425  
   426  	volumeType := string(idxpersist.DefaultIndexVolumeType)
   427  	infoFiles := []fs.ReadIndexInfoFileResult{
   428  		{
   429  			Info: indexpb.IndexVolumeInfo{
   430  				BlockStart: int64(blockTime),
   431  				BlockSize:  int64(indexBlockSize),
   432  				Shards:     []uint32{0, 1, 2},
   433  				IndexVolumeType: &protobuftypes.StringValue{
   434  					Value: volumeType,
   435  				},
   436  			},
   437  			AbsoluteFilePaths: []string{fset1.Name()},
   438  		},
   439  		{
   440  			Info: indexpb.IndexVolumeInfo{
   441  				BlockStart: int64(blockTime),
   442  				BlockSize:  int64(indexBlockSize),
   443  				Shards:     []uint32{4},
   444  				IndexVolumeType: &protobuftypes.StringValue{
   445  					Value: volumeType,
   446  				},
   447  			},
   448  			AbsoluteFilePaths: []string{fset2.Name()},
   449  		},
   450  	}
   451  
   452  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   453  		return infoFiles
   454  	}
   455  
   456  	idx.deleteFilesFn = func(s []string) error {
   457  		require.Equal(t, []string{}, s)
   458  		return nil
   459  	}
   460  	require.NoError(t, idx.CleanupDuplicateFileSets([]uint32{0, 1, 2, 4}))
   461  }
   462  
   463  func TestNamespaceIndexCleanupExpiredFilesetsWithBlocks(t *testing.T) {
   464  	ctrl := xtest.NewController(t)
   465  	defer ctrl.Finish()
   466  
   467  	md := testNamespaceMetadata(time.Hour, time.Hour*8)
   468  	nsIdx, err := newNamespaceIndex(md,
   469  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   470  		testShardSet, DefaultTestOptions())
   471  	require.NoError(t, err)
   472  
   473  	defer func() {
   474  		require.NoError(t, nsIdx.Close())
   475  	}()
   476  
   477  	now := xtime.Now().Truncate(time.Hour)
   478  	idx := nsIdx.(*nsIndex)
   479  
   480  	mockBlock := index.NewMockBlock(ctrl)
   481  	mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   482  	mockBlock.EXPECT().Close().Return(nil)
   483  	oldestTime := now.Add(-time.Hour * 9)
   484  	idx.state.blocksByTime[oldestTime] = mockBlock
   485  
   486  	idx.indexFilesetsBeforeFn = func(
   487  		dir string, nsID ident.ID, exclusiveTime xtime.UnixNano) ([]string, error) {
   488  		require.True(t, exclusiveTime.Equal(oldestTime))
   489  		return nil, nil
   490  	}
   491  	require.NoError(t, idx.CleanupExpiredFileSets(now))
   492  }
   493  
   494  func TestNamespaceIndexCleanupCorruptedFilesets(t *testing.T) {
   495  	md := testNamespaceMetadata(time.Hour, time.Hour*24)
   496  	nsIdx, err := newNamespaceIndex(md,
   497  		namespace.NewRuntimeOptionsManager(md.ID().String()),
   498  		testShardSet, DefaultTestOptions())
   499  	require.NoError(t, err)
   500  
   501  	idx := nsIdx.(*nsIndex)
   502  	now := xtime.Now().Truncate(time.Hour)
   503  	indexBlockSize := 2 * time.Hour
   504  	var (
   505  		blockStarts = []xtime.UnixNano{
   506  			now.Add(-6 * indexBlockSize),
   507  			now.Add(-5 * indexBlockSize),
   508  			now.Add(-4 * indexBlockSize),
   509  			now.Add(-3 * indexBlockSize),
   510  			now.Add(-2 * indexBlockSize),
   511  			now.Add(-1 * indexBlockSize),
   512  		}
   513  		shards = []uint32{0, 1, 2} // has no effect on this test
   514  
   515  		volumeTypeDefault = "default"
   516  		volumeTypeExtra   = "extra"
   517  	)
   518  
   519  	filesetsForTest := []struct {
   520  		infoFile     fs.ReadIndexInfoFileResult
   521  		shouldRemove bool
   522  	}{
   523  		{newReadIndexInfoFileResult(blockStarts[0], volumeTypeDefault, 0, shards), false},
   524  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[0], volumeTypeDefault, 1), true},
   525  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[0], volumeTypeDefault, 2), true},
   526  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[0], volumeTypeExtra, 5), true},
   527  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[0], volumeTypeExtra, 6), false},
   528  		{newReadIndexInfoFileResult(blockStarts[0], volumeTypeDefault, 11, shards), false},
   529  
   530  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[1], volumeTypeDefault, 1), false},
   531  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[1], 3), true},
   532  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[1], 4), true},
   533  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[1], volumeTypeExtra, 5), false},
   534  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[1], 6), true},
   535  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[1], 7), false},
   536  
   537  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[2], 0), true},
   538  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[2], volumeTypeDefault, 1), true},
   539  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[2], volumeTypeExtra, 2), true},
   540  		{newReadIndexInfoFileResult(blockStarts[2], volumeTypeDefault, 3, shards), false},
   541  		{newReadIndexInfoFileResult(blockStarts[2], volumeTypeExtra, 4, shards), false},
   542  
   543  		{newReadIndexInfoFileResult(blockStarts[3], volumeTypeDefault, 0, shards), false},
   544  
   545  		{newReadIndexInfoFileResultForCorruptedFileset(blockStarts[4], volumeTypeDefault, 0), false},
   546  
   547  		{newReadIndexInfoFileResultForCorruptedInfoFile(blockStarts[5], 0), false},
   548  	}
   549  
   550  	var (
   551  		infoFiles         = make([]fs.ReadIndexInfoFileResult, 0)
   552  		expectedFilenames = make([]string, 0)
   553  	)
   554  	for _, f := range filesetsForTest {
   555  		infoFiles = append(infoFiles, f.infoFile)
   556  		if f.shouldRemove {
   557  			expectedFilenames = append(expectedFilenames, f.infoFile.AbsoluteFilePaths...)
   558  		}
   559  	}
   560  
   561  	idx.readIndexInfoFilesFn = func(_ fs.ReadIndexInfoFilesOptions) []fs.ReadIndexInfoFileResult {
   562  		return infoFiles
   563  	}
   564  
   565  	deleteFilesFnInvoked := false
   566  	idx.deleteFilesFn = func(s []string) error {
   567  		sort.Strings(s)
   568  		sort.Strings(expectedFilenames)
   569  		require.Equal(t, expectedFilenames, s)
   570  		deleteFilesFnInvoked = true
   571  		return nil
   572  	}
   573  	require.NoError(t, idx.CleanupCorruptedFileSets())
   574  	require.True(t, deleteFilesFnInvoked)
   575  }
   576  
   577  func TestNamespaceIndexFlushSuccess(t *testing.T) {
   578  	ctrl := xtest.NewController(t)
   579  	defer ctrl.Finish()
   580  
   581  	test := newTestIndex(t, ctrl)
   582  
   583  	idx := test.index.(*nsIndex)
   584  
   585  	defer func() {
   586  		require.NoError(t, idx.Close())
   587  	}()
   588  
   589  	verifyFlushForShards(
   590  		t,
   591  		ctrl,
   592  		idx,
   593  		test.blockSize,
   594  		[]uint32{0},
   595  	)
   596  }
   597  
   598  func TestNamespaceIndexFlushSuccessMultipleShards(t *testing.T) {
   599  	ctrl := xtest.NewController(t)
   600  	defer ctrl.Finish()
   601  
   602  	test := newTestIndex(t, ctrl)
   603  
   604  	idx := test.index.(*nsIndex)
   605  
   606  	defer func() {
   607  		require.NoError(t, idx.Close())
   608  	}()
   609  
   610  	verifyFlushForShards(
   611  		t,
   612  		ctrl,
   613  		idx,
   614  		test.blockSize,
   615  		[]uint32{0, 1, 2},
   616  	)
   617  }
   618  
   619  func TestNamespaceIndexFlushShardStateNotSuccess(t *testing.T) {
   620  	ctrl := xtest.NewController(t)
   621  	defer ctrl.Finish()
   622  
   623  	test := newTestIndex(t, ctrl)
   624  
   625  	now := xtime.Now().Truncate(test.indexBlockSize)
   626  	idx := test.index.(*nsIndex)
   627  
   628  	defer func() {
   629  		require.NoError(t, idx.Close())
   630  	}()
   631  
   632  	// NB(bodu): We don't need to allocate a mock block for every block start we just need to
   633  	// ensure that we aren't flushing index data if TSDB is not on disk and a single mock block is sufficient.
   634  	mockBlock := index.NewMockBlock(ctrl)
   635  	mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   636  	blockTime := now.Add(-2 * test.indexBlockSize)
   637  	mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
   638  	mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
   639  	idx.state.blocksByTime[blockTime] = mockBlock
   640  
   641  	mockBlock.EXPECT().IsSealed().Return(true)
   642  	mockBlock.EXPECT().Close().Return(nil)
   643  
   644  	mockShard := NewMockdatabaseShard(ctrl)
   645  	mockShard.EXPECT().IsBootstrapped().Return(true).AnyTimes()
   646  	mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
   647  	mockShard.EXPECT().FlushState(gomock.Any()).Return(fileOpState{WarmStatus: warmStatus{
   648  		IndexFlushed: fileOpFailed,
   649  	}}, nil).AnyTimes()
   650  	shards := []databaseShard{mockShard}
   651  
   652  	mockFlush := persist.NewMockIndexFlush(ctrl)
   653  
   654  	err := idx.WarmFlush(mockFlush, shards)
   655  	require.NoError(t, err)
   656  }
   657  
   658  func TestNamespaceIndexQueryNoMatchingBlocks(t *testing.T) {
   659  	ctrl := xtest.NewController(t)
   660  	defer ctrl.Finish()
   661  
   662  	test := newTestIndex(t, ctrl)
   663  
   664  	now := xtime.Now().Truncate(test.indexBlockSize)
   665  	query := index.Query{Query: idx.NewTermQuery([]byte("foo"), []byte("bar"))}
   666  	idx := test.index.(*nsIndex)
   667  
   668  	defer func() {
   669  		require.NoError(t, idx.Close())
   670  	}()
   671  
   672  	mockBlock := index.NewMockBlock(ctrl)
   673  	mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   674  	blockTime := now.Add(-1 * test.indexBlockSize)
   675  	mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
   676  	mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
   677  	mockBlock.EXPECT().Close().Return(nil)
   678  	idx.state.blocksByTime[blockTime] = mockBlock
   679  
   680  	ctx := context.NewBackground()
   681  	defer ctx.Close()
   682  
   683  	start := now.Add(-3 * test.indexBlockSize)
   684  	end := now.Add(-2 * test.indexBlockSize)
   685  	// Query non-overlapping range
   686  	result, err := idx.Query(ctx, query, index.QueryOptions{
   687  		StartInclusive: start,
   688  		EndExclusive:   end,
   689  	})
   690  	require.NoError(t, err)
   691  	assert.True(t, result.Exhaustive)
   692  	assert.Equal(t, 0, result.Results.Size())
   693  
   694  	// Aggregate query on the non-overlapping range
   695  	aggResult, err := idx.AggregateQuery(ctx, query, index.AggregationOptions{
   696  		QueryOptions: index.QueryOptions{
   697  			StartInclusive: start,
   698  			EndExclusive:   end,
   699  		},
   700  	})
   701  	require.NoError(t, err)
   702  	assert.True(t, aggResult.Exhaustive)
   703  	assert.Equal(t, 0, aggResult.Results.Size())
   704  }
   705  
   706  func TestNamespaceIndexQueryTimeout(t *testing.T) {
   707  	ctrl := xtest.NewController(t)
   708  	defer ctrl.Finish()
   709  
   710  	test := newTestIndex(t, ctrl)
   711  
   712  	now := xtime.Now().Truncate(test.indexBlockSize)
   713  	query := index.Query{Query: idx.NewTermQuery([]byte("foo"), []byte("bar"))}
   714  	idx := test.index.(*nsIndex)
   715  
   716  	defer func() {
   717  		require.NoError(t, idx.Close())
   718  	}()
   719  
   720  	stdCtx, cancel := stdctx.WithTimeout(stdctx.Background(), time.Second)
   721  	defer cancel()
   722  	ctx := context.NewWithGoContext(stdCtx)
   723  	defer ctx.Close()
   724  
   725  	mockIter := index.NewMockQueryIterator(ctrl)
   726  	mockIter.EXPECT().Done().Return(false).Times(2)
   727  	mockIter.EXPECT().Close().Return(nil)
   728  
   729  	mockBlock := index.NewMockBlock(ctrl)
   730  	mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   731  	blockTime := now.Add(-1 * test.indexBlockSize)
   732  	mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
   733  	mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
   734  	mockBlock.EXPECT().QueryIter(gomock.Any(), gomock.Any()).Return(mockIter, nil)
   735  	mockBlock.EXPECT().
   736  		QueryWithIter(gomock.Any(), gomock.Any(), mockIter, gomock.Any(), gomock.Any(), gomock.Any()).
   737  		DoAndReturn(func(
   738  			ctx context.Context,
   739  			opts index.QueryOptions,
   740  			iter index.QueryIterator,
   741  			r index.QueryResults,
   742  			deadline time.Time,
   743  			logFields []opentracinglog.Field,
   744  		) error {
   745  			<-ctx.GoContext().Done()
   746  			return ctx.GoContext().Err()
   747  		})
   748  	mockBlock.EXPECT().Close().Return(nil)
   749  	idx.state.blocksByTime[blockTime] = mockBlock
   750  	idx.updateBlockStartsWithLock()
   751  
   752  	start := blockTime
   753  	end := blockTime.Add(test.indexBlockSize)
   754  
   755  	// Query non-overlapping range
   756  	_, err := idx.Query(ctx, query, index.QueryOptions{
   757  		StartInclusive: start,
   758  		EndExclusive:   end,
   759  	})
   760  	require.Error(t, err)
   761  	var multiErr xerrors.MultiError
   762  	require.True(t, errors.As(err, &multiErr))
   763  	require.True(t, multiErr.Contains(stdctx.DeadlineExceeded))
   764  }
   765  
   766  func TestNamespaceIndexFlushSkipBootstrappingShards(t *testing.T) {
   767  	ctrl := xtest.NewController(t)
   768  	defer ctrl.Finish()
   769  
   770  	test := newTestIndex(t, ctrl)
   771  
   772  	now := xtime.Now().Truncate(test.indexBlockSize)
   773  	idx := test.index.(*nsIndex)
   774  
   775  	defer func() {
   776  		require.NoError(t, idx.Close())
   777  	}()
   778  
   779  	// NB(bodu): We don't need to allocate a mock block for every block start we just need to
   780  	// ensure that we aren't flushing index data if TSDB is not on disk and a single mock block is sufficient.
   781  	mockBlock := index.NewMockBlock(ctrl)
   782  	mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   783  	blockTime := now.Add(-2 * test.indexBlockSize)
   784  	mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
   785  	mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
   786  	mockBlock.EXPECT().NeedsColdMutableSegmentsEvicted().Return(true).AnyTimes()
   787  	mockBlock.EXPECT().RotateColdMutableSegments().Return(nil).AnyTimes()
   788  	mockBlock.EXPECT().EvictColdMutableSegments().Return(nil).AnyTimes()
   789  	idx.state.blocksByTime[blockTime] = mockBlock
   790  
   791  	mockBlock.EXPECT().Close().Return(nil)
   792  
   793  	shardInfos := []struct {
   794  		id             uint32
   795  		isBootstrapped bool
   796  	}{
   797  		{0, true},
   798  		{1, false},
   799  		{2, true},
   800  		{3, false},
   801  	}
   802  
   803  	shards := make([]databaseShard, 0, len(shardInfos))
   804  	for _, shardInfo := range shardInfos {
   805  		mockShard := NewMockdatabaseShard(ctrl)
   806  		mockShard.EXPECT().IsBootstrapped().Return(shardInfo.isBootstrapped).AnyTimes()
   807  		mockShard.EXPECT().ID().Return(shardInfo.id).AnyTimes()
   808  		if shardInfo.isBootstrapped {
   809  			mockShard.EXPECT().FlushState(gomock.Any()).Return(fileOpState{WarmStatus: warmStatus{
   810  				IndexFlushed: fileOpSuccess,
   811  			}}, nil).AnyTimes()
   812  		}
   813  		shards = append(shards, mockShard)
   814  	}
   815  
   816  	done, err := idx.ColdFlush(shards)
   817  	require.NoError(t, err)
   818  	require.NoError(t, done())
   819  }
   820  
   821  func verifyFlushForShards(
   822  	t *testing.T,
   823  	ctrl *gomock.Controller,
   824  	idx *nsIndex,
   825  	blockSize time.Duration,
   826  	shards []uint32,
   827  ) {
   828  	var (
   829  		mockFlush          = persist.NewMockIndexFlush(ctrl)
   830  		shardMap           = make(map[uint32]struct{})
   831  		now                = xtime.Now()
   832  		warmBlockStart     = now.Add(-idx.bufferPast).Truncate(idx.blockSize)
   833  		mockShards         []*MockdatabaseShard
   834  		dbShards           []databaseShard
   835  		numBlocks          int
   836  		persistClosedTimes int
   837  		persistCalledTimes int
   838  		actualDocs         = make([]doc.Metadata, 0)
   839  		expectedDocs       = make([]doc.Metadata, 0)
   840  	)
   841  	// NB(bodu): Always align now w/ the index's view of now.
   842  	idx.nowFn = func() time.Time {
   843  		return now.ToTime()
   844  	}
   845  	for _, shard := range shards {
   846  		mockShard := NewMockdatabaseShard(ctrl)
   847  		mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
   848  		mockShards = append(mockShards, mockShard)
   849  		shardMap[shard] = struct{}{}
   850  		dbShards = append(dbShards, mockShard)
   851  	}
   852  	earliestBlockStartToRetain := retention.FlushTimeStartForRetentionPeriod(idx.retentionPeriod, idx.blockSize, now)
   853  	for blockStart := earliestBlockStartToRetain; blockStart.Before(warmBlockStart); blockStart = blockStart.Add(idx.blockSize) {
   854  		numBlocks++
   855  
   856  		mockBlock := index.NewMockBlock(ctrl)
   857  		mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
   858  		mockBlock.EXPECT().StartTime().Return(blockStart).AnyTimes()
   859  		mockBlock.EXPECT().EndTime().Return(blockStart.Add(idx.blockSize)).AnyTimes()
   860  		idx.state.blocksByTime[blockStart] = mockBlock
   861  
   862  		mockBlock.EXPECT().Close().Return(nil)
   863  
   864  		closer := func() ([]segment.Segment, error) {
   865  			persistClosedTimes++
   866  			return nil, nil
   867  		}
   868  		persistFn := func(b segment.Builder) error {
   869  			persistCalledTimes++
   870  			actualDocs = append(actualDocs, b.Docs()...)
   871  			return nil
   872  		}
   873  		preparedPersist := persist.PreparedIndexPersist{
   874  			Close:   closer,
   875  			Persist: persistFn,
   876  		}
   877  		mockFlush.EXPECT().PrepareIndex(xtest.CmpMatcher(persist.IndexPrepareOptions{
   878  			NamespaceMetadata: idx.nsMetadata,
   879  			BlockStart:        blockStart,
   880  			FileSetType:       persist.FileSetFlushType,
   881  			Shards:            map[uint32]struct{}{0: {}},
   882  			IndexVolumeType:   idxpersist.DefaultIndexVolumeType,
   883  		})).Return(preparedPersist, nil)
   884  
   885  		results := block.NewMockFetchBlocksMetadataResults(ctrl)
   886  
   887  		resultsID1 := ident.StringID("CACHED")
   888  		resultsID2 := ident.StringID("NEW")
   889  		doc1 := doc.Metadata{
   890  			ID:     resultsID1.Bytes(),
   891  			Fields: []doc.Field{},
   892  		}
   893  		doc2 := doc.Metadata{
   894  			ID:     resultsID2.Bytes(),
   895  			Fields: []doc.Field{},
   896  		}
   897  		expectedDocs = append(expectedDocs, doc1)
   898  		expectedDocs = append(expectedDocs, doc2)
   899  
   900  		for _, mockShard := range mockShards {
   901  			mockShard.EXPECT().IsBootstrapped().Return(true)
   902  			mockShard.EXPECT().FlushState(blockStart).Return(fileOpState{WarmStatus: warmStatus{
   903  				// Index flushing requires data flush already happened.
   904  				DataFlushed: fileOpSuccess,
   905  			}}, nil)
   906  			mockShard.EXPECT().FlushState(blockStart.Add(blockSize)).Return(fileOpState{WarmStatus: warmStatus{
   907  				// Index flushing requires data flush already happened.
   908  				DataFlushed: fileOpSuccess,
   909  			}}, nil)
   910  
   911  			resultsTags1 := ident.NewTagsIterator(ident.NewTags())
   912  			resultsTags2 := ident.NewTagsIterator(ident.NewTags())
   913  			resultsInShard := []block.FetchBlocksMetadataResult{
   914  				{
   915  					ID:   resultsID1,
   916  					Tags: resultsTags1,
   917  				},
   918  				{
   919  					ID:   resultsID2,
   920  					Tags: resultsTags2,
   921  				},
   922  			}
   923  			results.EXPECT().Results().Return(resultsInShard)
   924  			results.EXPECT().Close()
   925  
   926  			mockShard.EXPECT().DocRef(resultsID1).Return(doc1, true, nil)
   927  			mockShard.EXPECT().DocRef(resultsID2).Return(doc.Metadata{}, false, nil)
   928  
   929  			mockShard.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockStart, blockStart.Add(idx.blockSize),
   930  				gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{OnlyDisk: true}).Return(results, nil, nil)
   931  
   932  			// For a given index block, which in this test is 2x the size of a block, we expect that
   933  			// we mark as flushed 2 blockStarts that fall within the index block.
   934  			mockShard.EXPECT().MarkWarmIndexFlushStateSuccessOrError(blockStart, nil)
   935  			mockShard.EXPECT().MarkWarmIndexFlushStateSuccessOrError(blockStart.Add(blockSize), nil)
   936  		}
   937  
   938  		mockBlock.EXPECT().IsSealed().Return(true)
   939  		mockBlock.EXPECT().AddResults(gomock.Any()).Return(nil)
   940  		mockBlock.EXPECT().EvictMutableSegments().Return(nil)
   941  	}
   942  	err := idx.WarmFlush(mockFlush, dbShards)
   943  	require.NoError(t, err)
   944  	require.Equal(t, numBlocks, persistClosedTimes)
   945  	require.Equal(t, numBlocks, persistCalledTimes)
   946  	require.Equal(t, expectedDocs, actualDocs)
   947  }
   948  
   949  func newReadIndexInfoFileResult(
   950  	blockStart xtime.UnixNano,
   951  	volumeType string,
   952  	volumeIndex int,
   953  	shards []uint32,
   954  ) fs.ReadIndexInfoFileResult {
   955  	filenames := []string{
   956  		// TODO: this may be an error/
   957  		fmt.Sprintf("fileset-%v-%v-segement-1.db", blockStart, volumeIndex),
   958  		fmt.Sprintf("fileset-%v-%v-segement-2.db", blockStart, volumeIndex),
   959  	}
   960  	return fs.ReadIndexInfoFileResult{
   961  		ID: fs.FileSetFileIdentifier{
   962  			BlockStart:  blockStart,
   963  			VolumeIndex: volumeIndex,
   964  		},
   965  		Info: indexpb.IndexVolumeInfo{
   966  			BlockStart: int64(blockStart),
   967  			BlockSize:  int64(2 * time.Hour),
   968  			Shards:     shards,
   969  			IndexVolumeType: &protobuftypes.StringValue{
   970  				Value: volumeType,
   971  			},
   972  		},
   973  		AbsoluteFilePaths: filenames,
   974  		Corrupted:         false,
   975  	}
   976  }
   977  
   978  func newReadIndexInfoFileResultForCorruptedFileset(
   979  	blockStart xtime.UnixNano,
   980  	volumeType string,
   981  	volumeIndex int,
   982  ) fs.ReadIndexInfoFileResult {
   983  	res := newReadIndexInfoFileResult(blockStart, volumeType, volumeIndex, []uint32{})
   984  	res.Corrupted = true
   985  	return res
   986  }
   987  
   988  func newReadIndexInfoFileResultForCorruptedInfoFile(
   989  	blockStart xtime.UnixNano,
   990  	volumeIndex int,
   991  ) fs.ReadIndexInfoFileResult {
   992  	res := newReadIndexInfoFileResultForCorruptedFileset(blockStart, "", volumeIndex)
   993  	res.Info = indexpb.IndexVolumeInfo{}
   994  	return res
   995  }
   996  
   997  type testIndex struct {
   998  	index          NamespaceIndex
   999  	metadata       namespace.Metadata
  1000  	opts           Options
  1001  	blockSize      time.Duration
  1002  	indexBlockSize time.Duration
  1003  	retention      time.Duration
  1004  }
  1005  
  1006  func newTestIndex(t *testing.T, ctrl *gomock.Controller) testIndex {
  1007  	blockSize := time.Hour
  1008  	indexBlockSize := 2 * time.Hour
  1009  	retentionPeriod := 24 * time.Hour
  1010  	ropts := retention.NewOptions().
  1011  		SetBlockSize(blockSize).
  1012  		SetRetentionPeriod(retentionPeriod).
  1013  		SetBufferPast(blockSize / 2)
  1014  	nopts := namespace.NewOptions().
  1015  		SetRetentionOptions(ropts).
  1016  		SetIndexOptions(namespace.NewIndexOptions().SetBlockSize(indexBlockSize))
  1017  	md, err := namespace.NewMetadata(ident.StringID("testns"), nopts)
  1018  	require.NoError(t, err)
  1019  	opts := DefaultTestOptions()
  1020  	index, err := newNamespaceIndex(md,
  1021  		namespace.NewRuntimeOptionsManager(md.ID().String()),
  1022  		testShardSet, opts)
  1023  	require.NoError(t, err)
  1024  
  1025  	return testIndex{
  1026  		index:          index,
  1027  		metadata:       md,
  1028  		opts:           opts,
  1029  		blockSize:      blockSize,
  1030  		indexBlockSize: indexBlockSize,
  1031  		retention:      retentionPeriod,
  1032  	}
  1033  }