github.com/m3db/m3@v1.5.0/src/dbnode/storage/shard_test.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package storage
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"io/ioutil"
    27  	"os"
    28  	"strconv"
    29  	"sync"
    30  	"sync/atomic"
    31  	"testing"
    32  	"time"
    33  	"unsafe"
    34  
    35  	"github.com/golang/mock/gomock"
    36  	"github.com/stretchr/testify/assert"
    37  	"github.com/stretchr/testify/require"
    38  	"github.com/uber-go/tally"
    39  
    40  	"github.com/m3db/m3/src/dbnode/encoding"
    41  	"github.com/m3db/m3/src/dbnode/namespace"
    42  	"github.com/m3db/m3/src/dbnode/persist"
    43  	"github.com/m3db/m3/src/dbnode/persist/fs"
    44  	"github.com/m3db/m3/src/dbnode/retention"
    45  	"github.com/m3db/m3/src/dbnode/runtime"
    46  	"github.com/m3db/m3/src/dbnode/storage/block"
    47  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
    48  	"github.com/m3db/m3/src/dbnode/storage/index/convert"
    49  	"github.com/m3db/m3/src/dbnode/storage/series"
    50  	"github.com/m3db/m3/src/dbnode/ts"
    51  	xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
    52  	"github.com/m3db/m3/src/dbnode/x/xio"
    53  	"github.com/m3db/m3/src/m3ninx/doc"
    54  	"github.com/m3db/m3/src/x/checked"
    55  	"github.com/m3db/m3/src/x/context"
    56  	"github.com/m3db/m3/src/x/ident"
    57  	"github.com/m3db/m3/src/x/pool"
    58  	xtest "github.com/m3db/m3/src/x/test"
    59  	xtime "github.com/m3db/m3/src/x/time"
    60  )
    61  
    62  type testIncreasingIndex struct {
    63  	created uint64
    64  }
    65  
    66  func (i *testIncreasingIndex) nextIndex() uint64 {
    67  	created := atomic.AddUint64(&i.created, 1)
    68  	return created - 1
    69  }
    70  
    71  func testDatabaseShard(t *testing.T, opts Options) *dbShard {
    72  	return testDatabaseShardWithIndexFn(t, opts, nil, false)
    73  }
    74  
    75  func testDatabaseShardWithIndexFn(
    76  	t *testing.T,
    77  	opts Options,
    78  	idx NamespaceIndex,
    79  	coldWritesEnabled bool,
    80  ) *dbShard {
    81  	metadata, err := namespace.NewMetadata(
    82  		defaultTestNs1ID,
    83  		defaultTestNs1Opts.SetColdWritesEnabled(coldWritesEnabled),
    84  	)
    85  	require.NoError(t, err)
    86  	nsReaderMgr := newNamespaceReaderManager(metadata, tally.NoopScope, opts)
    87  
    88  	seriesOpts := NewSeriesOptionsFromOptions(opts, defaultTestNs1Opts.RetentionOptions()).
    89  		SetBufferBucketVersionsPool(series.NewBufferBucketVersionsPool(nil)).
    90  		SetBufferBucketPool(series.NewBufferBucketPool(nil)).
    91  		SetColdWritesEnabled(coldWritesEnabled)
    92  
    93  	return newDatabaseShard(metadata, 0, nil, nsReaderMgr,
    94  		&testIncreasingIndex{}, idx, true, opts, seriesOpts).(*dbShard)
    95  }
    96  
    97  func addMockSeries(ctrl *gomock.Controller, shard *dbShard, id ident.ID, tags ident.Tags, index uint64) *series.MockDatabaseSeries {
    98  	series := series.NewMockDatabaseSeries(ctrl)
    99  	series.EXPECT().ID().Return(id).AnyTimes()
   100  	series.EXPECT().IsEmpty().Return(false).AnyTimes()
   101  	shard.Lock()
   102  	shard.insertNewShardEntryWithLock(NewEntry(NewEntryOptions{
   103  		Series: series,
   104  		Index:  index,
   105  	}))
   106  	shard.Unlock()
   107  	return series
   108  }
   109  
   110  func TestShardDontNeedBootstrap(t *testing.T) {
   111  	opts := DefaultTestOptions()
   112  	testNs, closer := newTestNamespace(t)
   113  	defer closer()
   114  	seriesOpts := NewSeriesOptionsFromOptions(opts, testNs.Options().RetentionOptions())
   115  	shard := newDatabaseShard(testNs.metadata, 0, nil, nil,
   116  		&testIncreasingIndex{}, nil, false, opts, seriesOpts).(*dbShard)
   117  	defer shard.Close()
   118  
   119  	require.Equal(t, Bootstrapped, shard.bootstrapState)
   120  	require.True(t, shard.IsBootstrapped())
   121  }
   122  
   123  func TestShardErrorIfDoubleBootstrap(t *testing.T) {
   124  	opts := DefaultTestOptions()
   125  	testNs, closer := newTestNamespace(t)
   126  	defer closer()
   127  	seriesOpts := NewSeriesOptionsFromOptions(opts, testNs.Options().RetentionOptions())
   128  	shard := newDatabaseShard(testNs.metadata, 0, nil, nil,
   129  		&testIncreasingIndex{}, nil, false, opts, seriesOpts).(*dbShard)
   130  	defer shard.Close()
   131  
   132  	require.Equal(t, Bootstrapped, shard.bootstrapState)
   133  	require.True(t, shard.IsBootstrapped())
   134  }
   135  
   136  func TestShardBootstrapState(t *testing.T) {
   137  	opts := DefaultTestOptions()
   138  	s := testDatabaseShard(t, opts)
   139  	defer s.Close()
   140  
   141  	ctx := context.NewBackground()
   142  	defer ctx.Close()
   143  
   144  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   145  	require.NoError(t, s.Bootstrap(ctx, nsCtx))
   146  	require.Error(t, s.Bootstrap(ctx, nsCtx))
   147  }
   148  
   149  func TestShardFlushStateNotStarted(t *testing.T) {
   150  	dir, err := ioutil.TempDir("", "testdir")
   151  	require.NoError(t, err)
   152  	defer os.RemoveAll(dir)
   153  
   154  	now := xtime.Now()
   155  	nowFn := func() time.Time {
   156  		return now.ToTime()
   157  	}
   158  
   159  	opts := DefaultTestOptions()
   160  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
   161  		SetFilePathPrefix(dir)
   162  	opts = opts.
   163  		SetClockOptions(opts.ClockOptions().SetNowFn(nowFn)).
   164  		SetCommitLogOptions(opts.CommitLogOptions().
   165  			SetFilesystemOptions(fsOpts))
   166  
   167  	ropts := defaultTestRetentionOpts
   168  	earliest, latest := retention.FlushTimeStart(ropts, now), retention.FlushTimeEnd(ropts, now)
   169  
   170  	s := testDatabaseShard(t, opts)
   171  	defer s.Close()
   172  
   173  	ctx := context.NewBackground()
   174  	defer ctx.Close()
   175  
   176  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   177  	s.Bootstrap(ctx, nsCtx)
   178  
   179  	notStarted := fileOpState{WarmStatus: warmStatus{
   180  		DataFlushed: fileOpNotStarted,
   181  	}}
   182  	for st := earliest; !st.After(latest); st = st.Add(ropts.BlockSize()) {
   183  		flushState, err := s.FlushState(earliest)
   184  		require.NoError(t, err)
   185  		require.Equal(t, notStarted, flushState)
   186  	}
   187  }
   188  
   189  // TestShardBootstrapWithFlushVersion ensures that the shard is able to bootstrap
   190  // the cold flush version from the info files.
   191  func TestShardBootstrapWithFlushVersion(t *testing.T) {
   192  	dir, err := ioutil.TempDir("", "testdir")
   193  	require.NoError(t, err)
   194  	defer os.RemoveAll(dir)
   195  
   196  	ctrl := xtest.NewController(t)
   197  	defer ctrl.Finish()
   198  
   199  	var (
   200  		opts   = DefaultTestOptions()
   201  		fsOpts = opts.CommitLogOptions().FilesystemOptions().
   202  			SetFilePathPrefix(dir)
   203  		newClOpts = opts.CommitLogOptions().SetFilesystemOptions(fsOpts)
   204  	)
   205  	opts = opts.
   206  		SetCommitLogOptions(newClOpts)
   207  
   208  	s := testDatabaseShard(t, opts)
   209  	defer s.Close()
   210  
   211  	mockSeriesID := ident.StringID("series-1")
   212  	mockSeries := series.NewMockDatabaseSeries(ctrl)
   213  	mockSeries.EXPECT().ID().Return(mockSeriesID).AnyTimes()
   214  	mockSeries.EXPECT().IsEmpty().Return(false).AnyTimes()
   215  	mockSeries.EXPECT().Bootstrap(gomock.Any())
   216  
   217  	// Load the mock into the shard as an expected series so that we can assert
   218  	// on the call to its Bootstrap() method below.
   219  	entry := NewEntry(NewEntryOptions{
   220  		Series: mockSeries,
   221  	})
   222  	s.Lock()
   223  	s.insertNewShardEntryWithLock(entry)
   224  	s.Unlock()
   225  
   226  	writer, err := fs.NewWriter(fsOpts)
   227  	require.NoError(t, err)
   228  
   229  	var (
   230  		blockSize   = 2 * time.Hour
   231  		start       = xtime.Now().Truncate(blockSize)
   232  		blockStarts = []xtime.UnixNano{start, start.Add(blockSize)}
   233  	)
   234  	for i, blockStart := range blockStarts {
   235  		writer.Open(fs.DataWriterOpenOptions{
   236  			FileSetType: persist.FileSetFlushType,
   237  			Identifier: fs.FileSetFileIdentifier{
   238  				Namespace:   defaultTestNs1ID,
   239  				Shard:       s.ID(),
   240  				BlockStart:  blockStart,
   241  				VolumeIndex: i,
   242  			},
   243  		})
   244  		require.NoError(t, writer.Close())
   245  	}
   246  
   247  	ctx := context.NewBackground()
   248  	defer ctx.Close()
   249  
   250  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   251  	err = s.Bootstrap(ctx, nsCtx)
   252  	require.NoError(t, err)
   253  
   254  	require.Equal(t, Bootstrapped, s.bootstrapState)
   255  
   256  	for i, blockStart := range blockStarts {
   257  		flushState, err := s.FlushState(blockStart)
   258  		require.NoError(t, err)
   259  		require.Equal(t, i, flushState.ColdVersionFlushed)
   260  	}
   261  }
   262  
   263  // TestShardBootstrapWithFlushVersionNoCleanUp ensures that the shard is able to
   264  // bootstrap the cold flush version from the info files even if the DB stopped
   265  // before it was able clean up its files. For example, if the DB had volume 0,
   266  // did a cold flush producing volume 1, then terminated before cleaning up the
   267  // files from volume 0, the flush version for that block should be bootstrapped
   268  // to 1.
   269  func TestShardBootstrapWithFlushVersionNoCleanUp(t *testing.T) {
   270  	dir, err := ioutil.TempDir("", "testdir")
   271  	require.NoError(t, err)
   272  	defer os.RemoveAll(dir)
   273  
   274  	ctrl := xtest.NewController(t)
   275  	defer ctrl.Finish()
   276  
   277  	var (
   278  		opts      = DefaultTestOptions()
   279  		fsOpts    = opts.CommitLogOptions().FilesystemOptions().SetFilePathPrefix(dir)
   280  		newClOpts = opts.CommitLogOptions().SetFilesystemOptions(fsOpts)
   281  	)
   282  	opts = opts.
   283  		SetCommitLogOptions(newClOpts)
   284  
   285  	s := testDatabaseShard(t, opts)
   286  	defer s.Close()
   287  
   288  	writer, err := fs.NewWriter(fsOpts)
   289  	require.NoError(t, err)
   290  
   291  	var (
   292  		blockSize  = 2 * time.Hour
   293  		start      = xtime.Now().Truncate(blockSize)
   294  		numVolumes = 3
   295  	)
   296  	for i := 0; i < numVolumes; i++ {
   297  		writer.Open(fs.DataWriterOpenOptions{
   298  			FileSetType: persist.FileSetFlushType,
   299  			Identifier: fs.FileSetFileIdentifier{
   300  				Namespace:   defaultTestNs1ID,
   301  				Shard:       s.ID(),
   302  				BlockStart:  start,
   303  				VolumeIndex: i,
   304  			},
   305  		})
   306  		require.NoError(t, writer.Close())
   307  	}
   308  
   309  	ctx := context.NewBackground()
   310  	defer ctx.Close()
   311  
   312  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   313  	err = s.Bootstrap(ctx, nsCtx)
   314  	require.NoError(t, err)
   315  	require.Equal(t, Bootstrapped, s.bootstrapState)
   316  
   317  	flushState, err := s.FlushState(start)
   318  	require.NoError(t, err)
   319  	require.Equal(t, numVolumes-1, flushState.ColdVersionFlushed)
   320  }
   321  
   322  // TestShardBootstrapWithCacheShardIndices ensures that the shard is able to bootstrap
   323  // and call CacheShardIndices if a BlockRetrieverManager is present.
   324  func TestShardBootstrapWithCacheShardIndices(t *testing.T) {
   325  	dir, err := ioutil.TempDir("", "testdir")
   326  	require.NoError(t, err)
   327  	defer os.RemoveAll(dir)
   328  
   329  	ctrl := xtest.NewController(t)
   330  	defer ctrl.Finish()
   331  
   332  	var (
   333  		opts          = DefaultTestOptions()
   334  		fsOpts        = opts.CommitLogOptions().FilesystemOptions().SetFilePathPrefix(dir)
   335  		newClOpts     = opts.CommitLogOptions().SetFilesystemOptions(fsOpts)
   336  		mockRetriever = block.NewMockDatabaseBlockRetriever(ctrl)
   337  	)
   338  	opts = opts.SetCommitLogOptions(newClOpts)
   339  
   340  	s := testDatabaseShard(t, opts)
   341  	defer s.Close()
   342  	mockRetriever.EXPECT().CacheShardIndices([]uint32{s.ID()}).Return(nil)
   343  	s.setBlockRetriever(mockRetriever)
   344  
   345  	ctx := context.NewBackground()
   346  	defer ctx.Close()
   347  
   348  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   349  	err = s.Bootstrap(ctx, nsCtx)
   350  	require.NoError(t, err)
   351  	require.Equal(t, Bootstrapped, s.bootstrapState)
   352  }
   353  
   354  func TestShardFlushDuringBootstrap(t *testing.T) {
   355  	s := testDatabaseShard(t, DefaultTestOptions())
   356  	defer s.Close()
   357  	s.bootstrapState = Bootstrapping
   358  	err := s.WarmFlush(xtime.Now(), nil, namespace.Context{})
   359  	require.Equal(t, err, errShardNotBootstrappedToFlush)
   360  }
   361  
   362  func TestShardLoadLimitEnforcedIfSet(t *testing.T) {
   363  	testShardLoadLimit(t, 1, true)
   364  }
   365  
   366  func TestShardLoadLimitNotEnforcedIfNotSet(t *testing.T) {
   367  	testShardLoadLimit(t, 0, false)
   368  }
   369  
   370  func testShardLoadLimit(t *testing.T, limit int64, shouldReturnError bool) {
   371  	var (
   372  		memTrackerOptions = NewMemoryTrackerOptions(limit)
   373  		memTracker        = NewMemoryTracker(memTrackerOptions)
   374  		opts              = DefaultTestOptions().SetMemoryTracker(memTracker)
   375  		s                 = testDatabaseShard(t, opts)
   376  		blOpts            = opts.DatabaseBlockOptions()
   377  		testBlockSize     = 2 * time.Hour
   378  		start             = xtime.Now().Truncate(testBlockSize)
   379  		threeBytes        = checked.NewBytes([]byte("123"), nil)
   380  
   381  		sr      = result.NewShardResult(result.NewOptions())
   382  		fooTags = ident.NewTags(ident.StringTag("foo", "foe"))
   383  		barTags = ident.NewTags(ident.StringTag("bar", "baz"))
   384  	)
   385  	defer s.Close()
   386  	threeBytes.IncRef()
   387  	blocks := []block.DatabaseBlock{
   388  		block.NewDatabaseBlock(
   389  			start, testBlockSize, ts.Segment{Head: threeBytes},
   390  			blOpts, namespace.Context{},
   391  		),
   392  		block.NewDatabaseBlock(
   393  			start.Add(1*testBlockSize), testBlockSize, ts.Segment{Tail: threeBytes},
   394  			blOpts, namespace.Context{},
   395  		),
   396  	}
   397  
   398  	sr.AddBlock(ident.StringID("foo"), fooTags, blocks[0])
   399  	sr.AddBlock(ident.StringID("bar"), barTags, blocks[1])
   400  
   401  	seriesMap := sr.AllSeries()
   402  
   403  	ctx := context.NewBackground()
   404  	defer ctx.Close()
   405  
   406  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   407  	require.NoError(t, s.Bootstrap(ctx, nsCtx))
   408  
   409  	// First load will never trigger the limit.
   410  	require.NoError(t, s.LoadBlocks(seriesMap))
   411  
   412  	if shouldReturnError {
   413  		require.Error(t, s.LoadBlocks(seriesMap))
   414  	} else {
   415  		require.NoError(t, s.LoadBlocks(seriesMap))
   416  	}
   417  }
   418  
   419  func TestShardFlushSeriesFlushError(t *testing.T) {
   420  	ctrl := xtest.NewController(t)
   421  	defer ctrl.Finish()
   422  
   423  	blockStart := xtime.FromSeconds(21600)
   424  
   425  	s := testDatabaseShard(t, DefaultTestOptions())
   426  	defer s.Close()
   427  
   428  	ctx := context.NewBackground()
   429  	defer ctx.Close()
   430  
   431  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   432  	s.Bootstrap(ctx, nsCtx)
   433  
   434  	s.flushState.statesByTime[blockStart] = fileOpState{
   435  		WarmStatus: warmStatus{
   436  			DataFlushed: fileOpNotStarted,
   437  		},
   438  		NumFailures: 0,
   439  	}
   440  
   441  	var closed bool
   442  	flush := persist.NewMockFlushPreparer(ctrl)
   443  	prepared := persist.PreparedDataPersist{
   444  		Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
   445  		Close:   func() error { closed = true; return nil },
   446  	}
   447  	prepareOpts := xtest.CmpMatcher(persist.DataPrepareOptions{
   448  		NamespaceMetadata: s.namespace,
   449  		Shard:             s.shard,
   450  		BlockStart:        blockStart,
   451  	})
   452  	flush.EXPECT().PrepareData(prepareOpts).Return(prepared, nil)
   453  
   454  	flushed := make(map[int]struct{})
   455  	for i := 0; i < 2; i++ {
   456  		i := i
   457  		var expectedErr error
   458  		if i == 1 {
   459  			expectedErr = errors.New("error bar")
   460  		}
   461  		curr := series.NewMockDatabaseSeries(ctrl)
   462  		curr.EXPECT().ID().Return(ident.StringID("foo" + strconv.Itoa(i))).AnyTimes()
   463  		curr.EXPECT().IsEmpty().Return(false).AnyTimes()
   464  		curr.EXPECT().
   465  			WarmFlush(gomock.Any(), blockStart, gomock.Any(), gomock.Any()).
   466  			Do(func(context.Context, xtime.UnixNano, persist.DataFn, namespace.Context) {
   467  				flushed[i] = struct{}{}
   468  			}).
   469  			Return(series.FlushOutcomeErr, expectedErr)
   470  		s.list.PushBack(NewEntry(NewEntryOptions{
   471  			Series: curr,
   472  		}))
   473  	}
   474  
   475  	flushErr := s.WarmFlush(blockStart, flush, namespace.Context{})
   476  
   477  	require.Equal(t, len(flushed), 2)
   478  	for i := 0; i < 2; i++ {
   479  		_, ok := flushed[i]
   480  		require.True(t, ok)
   481  	}
   482  
   483  	require.True(t, closed)
   484  	require.NotNil(t, flushErr)
   485  	require.Equal(t, "error bar", flushErr.Error())
   486  
   487  	flushState, err := s.FlushState(blockStart)
   488  	require.NoError(t, err)
   489  	require.Equal(t, fileOpState{
   490  		WarmStatus: warmStatus{
   491  			DataFlushed: fileOpFailed,
   492  		},
   493  		NumFailures: 1,
   494  	}, flushState)
   495  }
   496  
   497  func TestShardFlushSeriesFlushSuccess(t *testing.T) {
   498  	ctrl := xtest.NewController(t)
   499  	defer ctrl.Finish()
   500  
   501  	blockStart := xtime.FromSeconds(21600)
   502  	now := xtime.Now()
   503  	nowFn := func() time.Time {
   504  		return now.ToTime()
   505  	}
   506  	opts := DefaultTestOptions()
   507  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
   508  
   509  	s := testDatabaseShard(t, opts)
   510  	defer s.Close()
   511  
   512  	ctx := context.NewBackground()
   513  	defer ctx.Close()
   514  
   515  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   516  	s.Bootstrap(ctx, nsCtx)
   517  
   518  	s.flushState.statesByTime[blockStart] = fileOpState{
   519  		WarmStatus: warmStatus{
   520  			DataFlushed: fileOpNotStarted,
   521  		},
   522  		NumFailures: 0,
   523  	}
   524  
   525  	var closed bool
   526  	flush := persist.NewMockFlushPreparer(ctrl)
   527  	prepared := persist.PreparedDataPersist{
   528  		Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
   529  		Close:   func() error { closed = true; return nil },
   530  	}
   531  
   532  	prepareOpts := xtest.CmpMatcher(persist.DataPrepareOptions{
   533  		NamespaceMetadata: s.namespace,
   534  		Shard:             s.shard,
   535  		BlockStart:        blockStart,
   536  	})
   537  	flush.EXPECT().PrepareData(prepareOpts).Return(prepared, nil)
   538  
   539  	flushed := make(map[int]struct{})
   540  	for i := 0; i < 2; i++ {
   541  		i := i
   542  		curr := series.NewMockDatabaseSeries(ctrl)
   543  		curr.EXPECT().ID().Return(ident.StringID("foo" + strconv.Itoa(i))).AnyTimes()
   544  		curr.EXPECT().IsEmpty().Return(false).AnyTimes()
   545  		curr.EXPECT().
   546  			WarmFlush(gomock.Any(), blockStart, gomock.Any(), gomock.Any()).
   547  			Do(func(context.Context, xtime.UnixNano, persist.DataFn, namespace.Context) {
   548  				flushed[i] = struct{}{}
   549  			}).
   550  			Return(series.FlushOutcomeFlushedToDisk, nil)
   551  		s.list.PushBack(NewEntry(NewEntryOptions{
   552  			Series: curr,
   553  		}))
   554  	}
   555  
   556  	err := s.WarmFlush(blockStart, flush, namespace.Context{})
   557  
   558  	require.Equal(t, len(flushed), 2)
   559  	for i := 0; i < 2; i++ {
   560  		_, ok := flushed[i]
   561  		require.True(t, ok)
   562  	}
   563  
   564  	require.True(t, closed)
   565  	require.Nil(t, err)
   566  
   567  	// State not yet updated since an explicit call to MarkWarmFlushStateSuccessOrError is required.
   568  	flushState, err := s.FlushState(blockStart)
   569  	require.NoError(t, err)
   570  	require.Equal(t, fileOpState{
   571  		WarmStatus: warmStatus{
   572  			DataFlushed: fileOpSuccess,
   573  		},
   574  		ColdVersionRetrievable: 0,
   575  		NumFailures:            0,
   576  	}, flushState)
   577  }
   578  
   579  type testDirtySeries struct {
   580  	id         ident.ID
   581  	dirtyTimes []xtime.UnixNano
   582  }
   583  
   584  func optimizedTimesFromTimes(times []xtime.UnixNano) series.OptimizedTimes {
   585  	var ret series.OptimizedTimes
   586  	for _, t := range times {
   587  		ret.Add(t)
   588  	}
   589  	return ret
   590  }
   591  
   592  func TestShardColdFlush(t *testing.T) {
   593  	dir, err := ioutil.TempDir("", "testdir")
   594  	require.NoError(t, err)
   595  	defer os.RemoveAll(dir)
   596  
   597  	ctrl := xtest.NewController(t)
   598  	defer ctrl.Finish()
   599  	now := xtime.Now()
   600  	nowFn := func() time.Time {
   601  		return now.ToTime()
   602  	}
   603  	opts := DefaultTestOptions()
   604  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
   605  		SetFilePathPrefix(dir)
   606  	opts = opts.
   607  		SetClockOptions(opts.ClockOptions().SetNowFn(nowFn)).
   608  		SetCommitLogOptions(opts.CommitLogOptions().
   609  			SetFilesystemOptions(fsOpts))
   610  
   611  	blockSize := opts.SeriesOptions().RetentionOptions().BlockSize()
   612  	shard := testDatabaseShard(t, opts)
   613  
   614  	ctx := context.NewBackground()
   615  	defer ctx.Close()
   616  
   617  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   618  	require.NoError(t, shard.Bootstrap(ctx, nsCtx))
   619  	shard.newMergerFn = newMergerTestFn
   620  	shard.newFSMergeWithMemFn = newFSMergeWithMemTestFn
   621  
   622  	t0 := now.Truncate(blockSize).Add(-10 * blockSize)
   623  	t1 := t0.Add(1 * blockSize)
   624  	t2 := t0.Add(2 * blockSize)
   625  	t3 := t0.Add(3 * blockSize)
   626  	t4 := t0.Add(4 * blockSize)
   627  	t5 := t0.Add(5 * blockSize)
   628  	t6 := t0.Add(6 * blockSize)
   629  	t7 := t0.Add(7 * blockSize)
   630  	// Mark t0-t6 (not t7) as having been warm flushed. Cold flushes can only
   631  	// happen after a successful warm flush because warm flushes currently don't
   632  	// have merging logic. This means that all blocks except t7 should
   633  	// successfully cold flush.
   634  	shard.markWarmDataFlushStateSuccess(t0)
   635  	shard.markWarmDataFlushStateSuccess(t1)
   636  	shard.markWarmDataFlushStateSuccess(t2)
   637  	shard.markWarmDataFlushStateSuccess(t3)
   638  	shard.markWarmDataFlushStateSuccess(t4)
   639  	shard.markWarmDataFlushStateSuccess(t5)
   640  	shard.markWarmDataFlushStateSuccess(t6)
   641  
   642  	dirtyData := []testDirtySeries{
   643  		{id: ident.StringID("id0"), dirtyTimes: []xtime.UnixNano{t0, t2, t3, t4}},
   644  		{id: ident.StringID("id1"), dirtyTimes: []xtime.UnixNano{t1}},
   645  		{id: ident.StringID("id2"), dirtyTimes: []xtime.UnixNano{t3, t4, t5}},
   646  		{id: ident.StringID("id3"), dirtyTimes: []xtime.UnixNano{t6, t7}},
   647  	}
   648  	for _, ds := range dirtyData {
   649  		curr := series.NewMockDatabaseSeries(ctrl)
   650  		curr.EXPECT().ID().Return(ds.id).AnyTimes()
   651  		curr.EXPECT().Metadata().Return(doc.Metadata{ID: ds.id.Bytes()}).AnyTimes()
   652  		curr.EXPECT().ColdFlushBlockStarts(gomock.Any()).
   653  			Return(optimizedTimesFromTimes(ds.dirtyTimes))
   654  		shard.list.PushBack(NewEntry(NewEntryOptions{
   655  			Series: curr,
   656  		}))
   657  	}
   658  
   659  	preparer := persist.NewMockFlushPreparer(ctrl)
   660  	fsReader := fs.NewMockDataFileSetReader(ctrl)
   661  	resources := coldFlushReusableResources{
   662  		dirtySeries:        newDirtySeriesMap(),
   663  		dirtySeriesToWrite: make(map[xtime.UnixNano]*idList),
   664  		idElementPool:      newIDElementPool(nil),
   665  		fsReader:           fsReader,
   666  	}
   667  
   668  	// Assert that flush state cold versions all start at 0.
   669  	for i := t0; i.Before(t7.Add(blockSize)); i = i.Add(blockSize) {
   670  		coldVersion, err := shard.RetrievableBlockColdVersion(i)
   671  		require.NoError(t, err)
   672  		require.Equal(t, 0, coldVersion)
   673  	}
   674  	shardColdFlush, err := shard.ColdFlush(preparer, resources, nsCtx, &persist.NoOpColdFlushNamespace{})
   675  	require.NoError(t, err)
   676  	require.NoError(t, shardColdFlush.Done())
   677  	// After a cold flush, t0-t6 previously dirty block starts should be updated
   678  	// to version 1.
   679  	for i := t0; i.Before(t6.Add(blockSize)); i = i.Add(blockSize) {
   680  		coldVersion, err := shard.RetrievableBlockColdVersion(i)
   681  		require.NoError(t, err)
   682  		require.Equal(t, 1, coldVersion)
   683  	}
   684  	// t7 shouldn't be cold flushed because it hasn't been warm flushed.
   685  	coldVersion, err := shard.RetrievableBlockColdVersion(t7)
   686  	require.NoError(t, err)
   687  	require.Equal(t, 0, coldVersion)
   688  }
   689  
   690  func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) {
   691  	ctrl := xtest.NewController(t)
   692  	defer ctrl.Finish()
   693  	now := xtime.Now()
   694  	nowFn := func() time.Time {
   695  		return now.ToTime()
   696  	}
   697  	opts := DefaultTestOptions()
   698  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
   699  	blockSize := opts.SeriesOptions().RetentionOptions().BlockSize()
   700  	shard := testDatabaseShard(t, opts)
   701  
   702  	ctx := context.NewBackground()
   703  	defer ctx.Close()
   704  
   705  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   706  	require.NoError(t, shard.Bootstrap(ctx, nsCtx))
   707  
   708  	shard.newMergerFn = newMergerTestFn
   709  	shard.newFSMergeWithMemFn = newFSMergeWithMemTestFn
   710  
   711  	t0 := now.Truncate(blockSize).Add(-10 * blockSize)
   712  	t1 := t0.Add(1 * blockSize)
   713  	t2 := t0.Add(2 * blockSize)
   714  	t3 := t0.Add(3 * blockSize)
   715  	shard.markWarmDataFlushStateSuccess(t0)
   716  	shard.markWarmDataFlushStateSuccess(t1)
   717  	shard.markWarmDataFlushStateSuccess(t2)
   718  	shard.markWarmDataFlushStateSuccess(t3)
   719  
   720  	preparer := persist.NewMockFlushPreparer(ctrl)
   721  	fsReader := fs.NewMockDataFileSetReader(ctrl)
   722  	idElementPool := newIDElementPool(nil)
   723  
   724  	// Pretend that dirtySeriesToWrite has been used previously, leaving
   725  	// behind empty idLists at some block starts. This is desired behavior since
   726  	// we don't want to reallocate new idLists for the same block starts when we
   727  	// process a different shard.
   728  	dirtySeriesToWrite := make(map[xtime.UnixNano]*idList)
   729  	dirtySeriesToWrite[t0] = newIDList(idElementPool)
   730  	dirtySeriesToWrite[t1] = newIDList(idElementPool)
   731  	dirtySeriesToWrite[t2] = newIDList(idElementPool)
   732  	dirtySeriesToWrite[t3] = newIDList(idElementPool)
   733  
   734  	resources := coldFlushReusableResources{
   735  		dirtySeries:        newDirtySeriesMap(),
   736  		dirtySeriesToWrite: dirtySeriesToWrite,
   737  		idElementPool:      idElementPool,
   738  		fsReader:           fsReader,
   739  	}
   740  
   741  	shardColdFlush, err := shard.ColdFlush(preparer, resources, nsCtx, &persist.NoOpColdFlushNamespace{})
   742  	require.NoError(t, err)
   743  	require.NoError(t, shardColdFlush.Done())
   744  	// After a cold flush, t0-t3 should remain version 0, since nothing should
   745  	// actually be merged.
   746  	for i := t0; i.Before(t3.Add(blockSize)); i = i.Add(blockSize) {
   747  		coldVersion, err := shard.RetrievableBlockColdVersion(i)
   748  		require.NoError(t, err)
   749  		assert.Equal(t, 0, coldVersion)
   750  	}
   751  }
   752  
   753  func newMergerTestFn(
   754  	_ fs.DataFileSetReader,
   755  	_ int,
   756  	_ xio.SegmentReaderPool,
   757  	_ encoding.MultiReaderIteratorPool,
   758  	_ ident.Pool,
   759  	_ encoding.EncoderPool,
   760  	_ context.Pool,
   761  	_ string,
   762  	_ namespace.Options,
   763  ) fs.Merger {
   764  	return &noopMerger{}
   765  }
   766  
   767  type noopMerger struct{}
   768  
   769  func (m *noopMerger) Merge(
   770  	_ fs.FileSetFileIdentifier,
   771  	_ fs.MergeWith,
   772  	_ int,
   773  	_ persist.FlushPreparer,
   774  	_ namespace.Context,
   775  	_ persist.OnFlushSeries,
   776  ) (persist.DataCloser, error) {
   777  	closer := func() error { return nil }
   778  	return closer, nil
   779  }
   780  
   781  func (m *noopMerger) MergeAndCleanup(
   782  	_ fs.FileSetFileIdentifier,
   783  	_ fs.MergeWith,
   784  	_ int,
   785  	_ persist.FlushPreparer,
   786  	_ namespace.Context,
   787  	_ persist.OnFlushSeries,
   788  	_ bool,
   789  ) error {
   790  	return nil
   791  }
   792  
   793  func newFSMergeWithMemTestFn(
   794  	_ databaseShard,
   795  	_ series.QueryableBlockRetriever,
   796  	_ *dirtySeriesMap,
   797  	_ map[xtime.UnixNano]*idList,
   798  ) fs.MergeWith {
   799  	return fs.NewNoopMergeWith()
   800  }
   801  
   802  func TestShardSnapshotShardNotBootstrapped(t *testing.T) {
   803  	ctrl := xtest.NewController(t)
   804  	defer ctrl.Finish()
   805  
   806  	blockStart := xtime.FromSeconds(21600)
   807  
   808  	s := testDatabaseShard(t, DefaultTestOptions())
   809  	defer s.Close()
   810  	s.bootstrapState = Bootstrapping
   811  
   812  	snapshotPreparer := persist.NewMockSnapshotPreparer(ctrl)
   813  	_, err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
   814  	require.Equal(t, errShardNotBootstrappedToSnapshot, err)
   815  }
   816  
   817  func TestShardSnapshotSeriesSnapshotSuccess(t *testing.T) {
   818  	ctrl := xtest.NewController(t)
   819  	defer ctrl.Finish()
   820  
   821  	blockStart := xtime.FromSeconds(21600)
   822  
   823  	s := testDatabaseShard(t, DefaultTestOptions())
   824  	defer s.Close()
   825  	s.bootstrapState = Bootstrapped
   826  
   827  	var closed bool
   828  	snapshotPreparer := persist.NewMockSnapshotPreparer(ctrl)
   829  	prepared := persist.PreparedDataPersist{
   830  		Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
   831  		Close:   func() error { closed = true; return nil },
   832  	}
   833  
   834  	prepareOpts := xtest.CmpMatcher(persist.DataPrepareOptions{
   835  		NamespaceMetadata: s.namespace,
   836  		Shard:             s.shard,
   837  		BlockStart:        blockStart,
   838  		FileSetType:       persist.FileSetSnapshotType,
   839  		Snapshot: persist.DataPrepareSnapshotOptions{
   840  			SnapshotTime: blockStart,
   841  		},
   842  	})
   843  	snapshotPreparer.EXPECT().PrepareData(prepareOpts).Return(prepared, nil)
   844  
   845  	snapshotted := make(map[int]struct{})
   846  	for i := 0; i < 2; i++ {
   847  		i := i
   848  		entry := series.NewMockDatabaseSeries(ctrl)
   849  		entry.EXPECT().ID().Return(ident.StringID("foo" + strconv.Itoa(i))).AnyTimes()
   850  		entry.EXPECT().IsEmpty().Return(false).AnyTimes()
   851  		entry.EXPECT().MarkNonEmptyBlocks(blockStart).
   852  			DoAndReturn(func(nonEmptyBlockStarts map[xtime.UnixNano]struct{}) {
   853  				nonEmptyBlockStarts[blockStart] = struct{}{}
   854  			}).AnyTimes()
   855  		entry.EXPECT().
   856  			Snapshot(gomock.Any(), blockStart, gomock.Any(), gomock.Any()).
   857  			Do(func(context.Context, xtime.UnixNano, persist.DataFn, namespace.Context) {
   858  				snapshotted[i] = struct{}{}
   859  			}).
   860  			Return(series.SnapshotResult{}, nil)
   861  		s.list.PushBack(NewEntry(NewEntryOptions{
   862  			Series: entry,
   863  		}))
   864  	}
   865  
   866  	_, err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
   867  	require.Equal(t, len(snapshotted), 2)
   868  	for i := 0; i < 2; i++ {
   869  		_, ok := snapshotted[i]
   870  		require.True(t, ok)
   871  	}
   872  
   873  	require.True(t, closed)
   874  	require.Nil(t, err)
   875  }
   876  
   877  func addMockTestSeries(ctrl *gomock.Controller, shard *dbShard, id ident.ID) *series.MockDatabaseSeries {
   878  	series := series.NewMockDatabaseSeries(ctrl)
   879  	series.EXPECT().ID().AnyTimes().Return(id)
   880  	shard.Lock()
   881  	shard.insertNewShardEntryWithLock(NewEntry(NewEntryOptions{
   882  		Series: series,
   883  	}))
   884  	shard.Unlock()
   885  	return series
   886  }
   887  
   888  func addTestSeries(shard *dbShard, id ident.ID) series.DatabaseSeries {
   889  	return addTestSeriesWithCount(shard, id, 0)
   890  }
   891  
   892  func addTestSeriesWithCount(shard *dbShard, id ident.ID, count int32) series.DatabaseSeries {
   893  	seriesEntry := series.NewDatabaseSeries(series.DatabaseSeriesOptions{
   894  		ID:          id,
   895  		UniqueIndex: 1,
   896  		Options:     shard.seriesOpts,
   897  	})
   898  	shard.Lock()
   899  	entry := NewEntry(NewEntryOptions{
   900  		Series: seriesEntry,
   901  	})
   902  	for i := int32(0); i < count; i++ {
   903  		entry.IncrementReaderWriterCount()
   904  	}
   905  	shard.insertNewShardEntryWithLock(entry)
   906  	shard.Unlock()
   907  	return seriesEntry
   908  }
   909  
   910  func writeShardAndVerify(
   911  	ctx context.Context,
   912  	t *testing.T,
   913  	shard *dbShard,
   914  	id string,
   915  	now xtime.UnixNano,
   916  	value float64,
   917  	expectedShouldWrite bool,
   918  	expectedIdx uint64,
   919  ) {
   920  	seriesWrite, err := shard.Write(ctx, ident.StringID(id),
   921  		now, value, xtime.Second, nil, series.WriteOptions{})
   922  	assert.NoError(t, err)
   923  	assert.Equal(t, expectedShouldWrite, seriesWrite.WasWritten)
   924  	assert.Equal(t, id, seriesWrite.Series.ID.String())
   925  	assert.Equal(t, "testns1", seriesWrite.Series.Namespace.String())
   926  	assert.Equal(t, expectedIdx, seriesWrite.Series.UniqueIndex)
   927  }
   928  
   929  func TestShardTick(t *testing.T) {
   930  	dir, err := ioutil.TempDir("", "testdir")
   931  	require.NoError(t, err)
   932  	defer os.RemoveAll(dir)
   933  
   934  	ctrl := xtest.NewController(t)
   935  	defer ctrl.Finish()
   936  
   937  	now := xtime.Now()
   938  	nowLock := sync.RWMutex{}
   939  	nowFn := func() xtime.UnixNano {
   940  		nowLock.RLock()
   941  		value := now
   942  		nowLock.RUnlock()
   943  		return value
   944  	}
   945  	clockFn := func() time.Time {
   946  		nowLock.RLock()
   947  		value := now
   948  		nowLock.RUnlock()
   949  		return value.ToTime()
   950  	}
   951  	setNow := func(t xtime.UnixNano) {
   952  		nowLock.Lock()
   953  		now = t
   954  		nowLock.Unlock()
   955  	}
   956  
   957  	opts := DefaultTestOptions()
   958  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(clockFn))
   959  
   960  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
   961  		SetFilePathPrefix(dir)
   962  	opts = opts.
   963  		SetCommitLogOptions(opts.CommitLogOptions().
   964  			SetFilesystemOptions(fsOpts))
   965  
   966  	earliestFlush := retention.FlushTimeStart(defaultTestRetentionOpts, now)
   967  	beforeEarliestFlush := earliestFlush.Add(-defaultTestRetentionOpts.BlockSize())
   968  
   969  	sleepPerSeries := time.Microsecond
   970  
   971  	ctx := context.NewBackground()
   972  	defer ctx.Close()
   973  
   974  	shard := testDatabaseShard(t, opts)
   975  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
   976  	shard.Bootstrap(ctx, nsCtx)
   977  	shard.SetRuntimeOptions(runtime.NewOptions().
   978  		SetTickPerSeriesSleepDuration(sleepPerSeries).
   979  		SetTickSeriesBatchSize(1))
   980  	retriever := series.NewMockQueryableBlockRetriever(ctrl)
   981  	retriever.EXPECT().IsBlockRetrievable(gomock.Any()).Return(false, nil).AnyTimes()
   982  	shard.seriesBlockRetriever = retriever
   983  	defer shard.Close()
   984  
   985  	// Also check that it expires flush states by time
   986  	shard.flushState.statesByTime[earliestFlush] = fileOpState{
   987  		WarmStatus: warmStatus{
   988  			DataFlushed: fileOpSuccess,
   989  		},
   990  	}
   991  	shard.flushState.statesByTime[beforeEarliestFlush] = fileOpState{
   992  		WarmStatus: warmStatus{
   993  			DataFlushed: fileOpSuccess,
   994  		},
   995  	}
   996  	assert.Equal(t, 2, len(shard.flushState.statesByTime))
   997  
   998  	var slept time.Duration
   999  	shard.sleepFn = func(t time.Duration) {
  1000  		slept += t
  1001  		setNow(nowFn().Add(t))
  1002  	}
  1003  
  1004  	writeShardAndVerify(ctx, t, shard, "foo", nowFn(), 1.0, true, 0)
  1005  	// same time, different value should write
  1006  	writeShardAndVerify(ctx, t, shard, "foo", nowFn(), 2.0, true, 0)
  1007  
  1008  	writeShardAndVerify(ctx, t, shard, "bar", nowFn(), 2.0, true, 1)
  1009  	// same tme, same value should not write
  1010  	writeShardAndVerify(ctx, t, shard, "bar", nowFn(), 2.0, false, 1)
  1011  
  1012  	writeShardAndVerify(ctx, t, shard, "baz", nowFn(), 3.0, true, 2)
  1013  	// different time, same value should write
  1014  	writeShardAndVerify(ctx, t, shard, "baz", nowFn().Add(1), 3.0, true, 2)
  1015  
  1016  	// same time, same value should not write, regardless of being out of order
  1017  	writeShardAndVerify(ctx, t, shard, "foo", nowFn(), 2.0, false, 0)
  1018  
  1019  	r, err := shard.Tick(context.NewNoOpCanncellable(), nowFn(), namespace.Context{})
  1020  	require.NoError(t, err)
  1021  	require.Equal(t, 3, r.activeSeries)
  1022  	require.Equal(t, 0, r.expiredSeries)
  1023  	require.Equal(t, 2*sleepPerSeries, slept) // Never sleeps on the first series
  1024  
  1025  	// Ensure flush states by time was expired correctly
  1026  	require.Equal(t, 1, len(shard.flushState.statesByTime))
  1027  	_, ok := shard.flushState.statesByTime[earliestFlush]
  1028  	require.True(t, ok)
  1029  }
  1030  
  1031  type testWrite struct {
  1032  	id         string
  1033  	value      float64
  1034  	unit       xtime.Unit
  1035  	annotation []byte
  1036  }
  1037  
  1038  func TestShardWriteAsync(t *testing.T) {
  1039  	testShardWriteAsync(t, []testWrite{
  1040  		{
  1041  			id:    "foo",
  1042  			value: 1.0,
  1043  			unit:  xtime.Second,
  1044  		},
  1045  		{
  1046  			id:    "bar",
  1047  			value: 2.0,
  1048  			unit:  xtime.Second,
  1049  		},
  1050  		{
  1051  			id:    "baz",
  1052  			value: 3.0,
  1053  			unit:  xtime.Second,
  1054  		},
  1055  	})
  1056  }
  1057  
  1058  func TestShardWriteAsyncWithAnnotations(t *testing.T) {
  1059  	testShardWriteAsync(t, []testWrite{
  1060  		{
  1061  			id:         "foo",
  1062  			value:      1.0,
  1063  			unit:       xtime.Second,
  1064  			annotation: []byte("annotation1"),
  1065  		},
  1066  		{
  1067  			id:         "bar",
  1068  			value:      2.0,
  1069  			unit:       xtime.Second,
  1070  			annotation: []byte("annotation2"),
  1071  		},
  1072  		{
  1073  			id:         "baz",
  1074  			value:      3.0,
  1075  			unit:       xtime.Second,
  1076  			annotation: []byte("annotation3"),
  1077  		},
  1078  	})
  1079  }
  1080  
  1081  func testShardWriteAsync(t *testing.T, writes []testWrite) {
  1082  	dir, err := ioutil.TempDir("", "testdir")
  1083  	require.NoError(t, err)
  1084  	defer os.RemoveAll(dir)
  1085  
  1086  	ctrl := xtest.NewController(t)
  1087  	defer ctrl.Finish()
  1088  
  1089  	testReporter := xmetrics.NewTestStatsReporter(xmetrics.NewTestStatsReporterOptions())
  1090  	scope, closer := tally.NewRootScope(tally.ScopeOptions{
  1091  		Reporter: testReporter,
  1092  	}, 100*time.Millisecond)
  1093  	defer closer.Close()
  1094  
  1095  	now := xtime.Now()
  1096  	nowLock := sync.RWMutex{}
  1097  	nowFn := func() time.Time {
  1098  		nowLock.RLock()
  1099  		value := now
  1100  		nowLock.RUnlock()
  1101  		return value.ToTime()
  1102  	}
  1103  	setNow := func(t time.Time) {
  1104  		nowLock.Lock()
  1105  		now = xtime.ToUnixNano(t)
  1106  		nowLock.Unlock()
  1107  	}
  1108  
  1109  	mockBytesPool := pool.NewMockCheckedBytesPool(ctrl)
  1110  	for _, write := range writes {
  1111  		if write.annotation != nil {
  1112  			mockBytes := checked.NewMockBytes(ctrl)
  1113  			mockBytes.EXPECT().IncRef()
  1114  			mockBytes.EXPECT().AppendAll(write.annotation)
  1115  			mockBytes.EXPECT().Bytes()
  1116  			mockBytes.EXPECT().DecRef()
  1117  			mockBytes.EXPECT().Finalize()
  1118  
  1119  			mockBytesPool.
  1120  				EXPECT().
  1121  				Get(gomock.Any()).
  1122  				Return(mockBytes)
  1123  		}
  1124  	}
  1125  
  1126  	opts := DefaultTestOptions().
  1127  		SetBytesPool(mockBytesPool)
  1128  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
  1129  		SetFilePathPrefix(dir)
  1130  	opts = opts.
  1131  		SetInstrumentOptions(
  1132  			opts.InstrumentOptions().
  1133  				SetMetricsScope(scope).
  1134  				SetReportInterval(100 * time.Millisecond)).
  1135  		SetClockOptions(
  1136  			opts.ClockOptions().SetNowFn(nowFn)).
  1137  		SetCommitLogOptions(opts.CommitLogOptions().
  1138  			SetFilesystemOptions(fsOpts))
  1139  
  1140  	earliestFlush := retention.FlushTimeStart(defaultTestRetentionOpts, now)
  1141  	beforeEarliestFlush := earliestFlush.Add(-defaultTestRetentionOpts.BlockSize())
  1142  
  1143  	sleepPerSeries := time.Microsecond
  1144  
  1145  	ctx := context.NewBackground()
  1146  	defer ctx.Close()
  1147  
  1148  	shard := testDatabaseShard(t, opts)
  1149  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
  1150  	shard.Bootstrap(ctx, nsCtx)
  1151  	shard.SetRuntimeOptions(runtime.NewOptions().
  1152  		SetWriteNewSeriesAsync(true).
  1153  		SetTickPerSeriesSleepDuration(sleepPerSeries).
  1154  		SetTickSeriesBatchSize(1))
  1155  	retriever := series.NewMockQueryableBlockRetriever(ctrl)
  1156  	retriever.EXPECT().IsBlockRetrievable(gomock.Any()).Return(false, nil).AnyTimes()
  1157  	shard.seriesBlockRetriever = retriever
  1158  	defer shard.Close()
  1159  
  1160  	// Also check that it expires flush states by time
  1161  	shard.flushState.statesByTime[earliestFlush] = fileOpState{
  1162  		WarmStatus: warmStatus{
  1163  			DataFlushed: fileOpSuccess,
  1164  		},
  1165  	}
  1166  	shard.flushState.statesByTime[beforeEarliestFlush] = fileOpState{
  1167  		WarmStatus: warmStatus{
  1168  			DataFlushed: fileOpSuccess,
  1169  		},
  1170  	}
  1171  	assert.Equal(t, 2, len(shard.flushState.statesByTime))
  1172  
  1173  	var slept time.Duration
  1174  	shard.sleepFn = func(t time.Duration) {
  1175  		slept += t
  1176  		setNow(nowFn().Add(t))
  1177  	}
  1178  
  1179  	for _, write := range writes {
  1180  		_, err := shard.Write(ctx, ident.StringID(write.id), xtime.ToUnixNano(nowFn()),
  1181  			write.value, write.unit, write.annotation, series.WriteOptions{})
  1182  		require.NoError(t, err)
  1183  	}
  1184  
  1185  	for {
  1186  		counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"]
  1187  		if ok && counter == int64(len(writes)) {
  1188  			break
  1189  		}
  1190  		time.Sleep(10 * time.Millisecond)
  1191  	}
  1192  
  1193  	r, err := shard.Tick(context.NewNoOpCanncellable(), xtime.ToUnixNano(nowFn()), namespace.Context{})
  1194  	require.NoError(t, err)
  1195  	require.Equal(t, len(writes), r.activeSeries)
  1196  	require.Equal(t, 0, r.expiredSeries)
  1197  	require.Equal(t, 2*sleepPerSeries, slept) // Never sleeps on the first series
  1198  
  1199  	// Ensure flush states by time was expired correctly
  1200  	require.Equal(t, 1, len(shard.flushState.statesByTime))
  1201  	_, ok := shard.flushState.statesByTime[earliestFlush]
  1202  	require.True(t, ok)
  1203  
  1204  	// Verify the documents in the shard's series are present.
  1205  	for _, w := range writes {
  1206  		doc, exists, err := shard.DocRef(ident.StringID(w.id))
  1207  		require.NoError(t, err)
  1208  		require.True(t, exists)
  1209  		require.Equal(t, w.id, string(doc.ID))
  1210  	}
  1211  	document, exists, err := shard.DocRef(ident.StringID("NOT_PRESENT_ID"))
  1212  	require.NoError(t, err)
  1213  	require.False(t, exists)
  1214  	require.Equal(t, doc.Metadata{}, document)
  1215  }
  1216  
  1217  // This tests a race in shard ticking with an empty series pending expiration.
  1218  func TestShardTickRace(t *testing.T) {
  1219  	opts := DefaultTestOptions()
  1220  	shard := testDatabaseShard(t, opts)
  1221  	defer shard.Close()
  1222  
  1223  	ctx := context.NewBackground()
  1224  	defer ctx.Close()
  1225  
  1226  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
  1227  	shard.Bootstrap(ctx, nsCtx)
  1228  
  1229  	addTestSeries(shard, ident.StringID("foo"))
  1230  
  1231  	var wg sync.WaitGroup
  1232  
  1233  	wg.Add(2)
  1234  	go func() {
  1235  		shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{}) // nolint
  1236  		wg.Done()
  1237  	}()
  1238  
  1239  	go func() {
  1240  		shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{}) // nolint
  1241  		wg.Done()
  1242  	}()
  1243  
  1244  	wg.Wait()
  1245  
  1246  	shard.RLock()
  1247  	shardlen := shard.lookup.Len()
  1248  	shard.RUnlock()
  1249  
  1250  	require.Equal(t, 0, shardlen)
  1251  }
  1252  
  1253  // Catches a logic bug we had trying to purgeSeries and counted the reference
  1254  // we had while trying to purge as a concurrent read.
  1255  func TestShardTickCleanupSmallBatchSize(t *testing.T) {
  1256  	opts := DefaultTestOptions()
  1257  
  1258  	ctx := context.NewBackground()
  1259  	defer ctx.Close()
  1260  
  1261  	shard := testDatabaseShard(t, opts)
  1262  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
  1263  	shard.Bootstrap(ctx, nsCtx)
  1264  
  1265  	addTestSeries(shard, ident.StringID("foo"))
  1266  	_, err := shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{})
  1267  	require.NoError(t, err)
  1268  	require.Equal(t, 0, shard.lookup.Len())
  1269  }
  1270  
  1271  // This tests ensures the shard returns an error if two ticks are triggered concurrently.
  1272  func TestShardReturnsErrorForConcurrentTicks(t *testing.T) {
  1273  	dir, err := ioutil.TempDir("", "testdir")
  1274  	require.NoError(t, err)
  1275  	defer os.RemoveAll(dir)
  1276  
  1277  	ctrl := xtest.NewController(t)
  1278  	defer ctrl.Finish()
  1279  
  1280  	opts := DefaultTestOptions()
  1281  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
  1282  		SetFilePathPrefix(dir)
  1283  	opts = opts.
  1284  		SetCommitLogOptions(opts.CommitLogOptions().
  1285  			SetFilesystemOptions(fsOpts))
  1286  
  1287  	ctx := context.NewBackground()
  1288  	defer ctx.Close()
  1289  
  1290  	shard := testDatabaseShard(t, opts)
  1291  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
  1292  	shard.Bootstrap(ctx, nsCtx)
  1293  	shard.currRuntimeOptions.tickSleepSeriesBatchSize = 1
  1294  	shard.currRuntimeOptions.tickSleepPerSeries = time.Millisecond
  1295  
  1296  	var (
  1297  		foo     = addMockTestSeries(ctrl, shard, ident.StringID("foo"))
  1298  		tick1Wg sync.WaitGroup
  1299  		tick2Wg sync.WaitGroup
  1300  		closeWg sync.WaitGroup
  1301  	)
  1302  
  1303  	tick1Wg.Add(1)
  1304  	tick2Wg.Add(1)
  1305  	closeWg.Add(2)
  1306  
  1307  	// wait to return the other tick has returned error
  1308  	foo.EXPECT().Tick(gomock.Any(), gomock.Any()).Do(func(interface{}, interface{}) {
  1309  		tick1Wg.Done()
  1310  		tick2Wg.Wait()
  1311  	}).Return(series.TickResult{}, nil)
  1312  
  1313  	go func() {
  1314  		_, err := shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{})
  1315  		if err != nil {
  1316  			panic(err)
  1317  		}
  1318  		closeWg.Done()
  1319  	}()
  1320  
  1321  	go func() {
  1322  		tick1Wg.Wait()
  1323  		_, err := shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{})
  1324  		require.Error(t, err)
  1325  		tick2Wg.Done()
  1326  		closeWg.Done()
  1327  	}()
  1328  
  1329  	closeWg.Wait()
  1330  }
  1331  
  1332  // This tests ensures the resources held in series contained in the shard are released
  1333  // when closing the shard.
  1334  func TestShardTicksWhenClosed(t *testing.T) {
  1335  	ctrl := xtest.NewController(t)
  1336  	defer ctrl.Finish()
  1337  
  1338  	opts := DefaultTestOptions()
  1339  	shard := testDatabaseShard(t, opts)
  1340  	s := addMockTestSeries(ctrl, shard, ident.StringID("foo"))
  1341  
  1342  	gomock.InOrder(
  1343  		s.EXPECT().IsEmpty().Return(true),
  1344  		s.EXPECT().Close(),
  1345  	)
  1346  	require.NoError(t, shard.Close())
  1347  }
  1348  
  1349  // This tests ensures the shard terminates Ticks when closing.
  1350  func TestShardTicksStopWhenClosing(t *testing.T) {
  1351  	ctrl := xtest.NewController(t)
  1352  	defer ctrl.Finish()
  1353  
  1354  	opts := DefaultTestOptions()
  1355  	shard := testDatabaseShard(t, opts)
  1356  	shard.currRuntimeOptions.tickSleepSeriesBatchSize = 1
  1357  	shard.currRuntimeOptions.tickSleepPerSeries = time.Millisecond
  1358  
  1359  	var (
  1360  		foo     = addMockTestSeries(ctrl, shard, ident.StringID("foo"))
  1361  		bar     = addMockTestSeries(ctrl, shard, ident.StringID("bar"))
  1362  		closeWg sync.WaitGroup
  1363  		orderWg sync.WaitGroup
  1364  	)
  1365  
  1366  	orderWg.Add(1)
  1367  	gomock.InOrder(
  1368  		// loop until the shard is marked for Closing
  1369  		foo.EXPECT().Tick(gomock.Any(), gomock.Any()).Do(func(interface{}, interface{}) {
  1370  			orderWg.Done()
  1371  			for {
  1372  				if shard.isClosing() {
  1373  					break
  1374  				}
  1375  				time.Sleep(10 * time.Millisecond)
  1376  			}
  1377  		}).Return(series.TickResult{}, nil),
  1378  		// for the shard Close purging
  1379  		foo.EXPECT().IsEmpty().Return(true),
  1380  		foo.EXPECT().Close(),
  1381  		bar.EXPECT().IsEmpty().Return(true),
  1382  		bar.EXPECT().Close(),
  1383  	)
  1384  
  1385  	closeWg.Add(2)
  1386  	go func() {
  1387  		shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{}) // nolint
  1388  		closeWg.Done()
  1389  	}()
  1390  
  1391  	go func() {
  1392  		orderWg.Wait()
  1393  		require.NoError(t, shard.Close())
  1394  		closeWg.Done()
  1395  	}()
  1396  
  1397  	closeWg.Wait()
  1398  }
  1399  
  1400  // This tests the scenario where an empty series is expired.
  1401  func TestPurgeExpiredSeriesEmptySeries(t *testing.T) {
  1402  	opts := DefaultTestOptions()
  1403  	shard := testDatabaseShard(t, opts)
  1404  	defer shard.Close()
  1405  
  1406  	addTestSeries(shard, ident.StringID("foo"))
  1407  
  1408  	_, err := shard.Tick(context.NewNoOpCanncellable(), xtime.Now(), namespace.Context{})
  1409  	require.NoError(t, err)
  1410  
  1411  	shard.RLock()
  1412  	require.Equal(t, 0, shard.lookup.Len())
  1413  	shard.RUnlock()
  1414  }
  1415  
  1416  // This tests the scenario where a non-empty series is not expired.
  1417  func TestPurgeExpiredSeriesNonEmptySeries(t *testing.T) {
  1418  	ctrl := xtest.NewController(t)
  1419  	defer ctrl.Finish()
  1420  
  1421  	opts := DefaultTestOptions()
  1422  	shard := testDatabaseShard(t, opts)
  1423  	retriever := series.NewMockQueryableBlockRetriever(ctrl)
  1424  	retriever.EXPECT().IsBlockRetrievable(gomock.Any()).Return(false, nil).AnyTimes()
  1425  	shard.seriesBlockRetriever = retriever
  1426  	defer shard.Close()
  1427  	ctx := opts.ContextPool().Get()
  1428  	nowFn := opts.ClockOptions().NowFn()
  1429  	_, err := shard.Write(ctx, ident.StringID("foo"), xtime.ToUnixNano(nowFn()),
  1430  		1.0, xtime.Second, nil, series.WriteOptions{})
  1431  	require.NoError(t, err)
  1432  
  1433  	r, err := shard.tickAndExpire(context.NewNoOpCanncellable(), tickPolicyRegular, namespace.Context{})
  1434  	require.NoError(t, err)
  1435  	require.Equal(t, 1, r.activeSeries)
  1436  	require.Equal(t, 0, r.expiredSeries)
  1437  }
  1438  
  1439  // This tests the scenario where a series is empty when series.Tick() is called,
  1440  // but receives writes after tickForEachSeries finishes but before purgeExpiredSeries
  1441  // starts. The expected behavior is not to expire series in this case.
  1442  func TestPurgeExpiredSeriesWriteAfterTicking(t *testing.T) {
  1443  	ctrl := xtest.NewController(t)
  1444  	defer ctrl.Finish()
  1445  
  1446  	opts := DefaultTestOptions()
  1447  	shard := testDatabaseShard(t, opts)
  1448  	defer shard.Close()
  1449  	id := ident.StringID("foo")
  1450  	s := addMockSeries(ctrl, shard, id, ident.Tags{}, 0)
  1451  	s.EXPECT().Tick(gomock.Any(), gomock.Any()).Do(func(interface{}, interface{}) {
  1452  		// Emulate a write taking place just after tick for this series
  1453  		s.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any(),
  1454  			gomock.Any(), gomock.Any(), gomock.Any()).Return(true, series.WarmWrite, nil)
  1455  
  1456  		ctx := opts.ContextPool().Get()
  1457  		nowFn := opts.ClockOptions().NowFn()
  1458  		_, err := shard.Write(
  1459  			ctx, id, xtime.ToUnixNano(nowFn()), 1.0, xtime.Second,
  1460  			nil, series.WriteOptions{},
  1461  		)
  1462  		require.NoError(t, err)
  1463  	}).Return(series.TickResult{}, series.ErrSeriesAllDatapointsExpired)
  1464  
  1465  	r, err := shard.tickAndExpire(context.NewNoOpCanncellable(), tickPolicyRegular, namespace.Context{})
  1466  	require.NoError(t, err)
  1467  	require.Equal(t, 0, r.activeSeries)
  1468  	require.Equal(t, 1, r.expiredSeries)
  1469  	require.Equal(t, 1, shard.lookup.Len())
  1470  }
  1471  
  1472  // This tests the scenario where tickForEachSeries finishes, and before purgeExpiredSeries
  1473  // starts, we receive a write for a series, then purgeExpiredSeries runs, then we write to
  1474  // the series. The expected behavior is not to expire series in this case.
  1475  func TestPurgeExpiredSeriesWriteAfterPurging(t *testing.T) {
  1476  	ctrl := xtest.NewController(t)
  1477  	defer ctrl.Finish()
  1478  
  1479  	var entry *Entry
  1480  
  1481  	opts := DefaultTestOptions()
  1482  	shard := testDatabaseShard(t, opts)
  1483  	defer shard.Close()
  1484  	id := ident.StringID("foo")
  1485  	s := addMockSeries(ctrl, shard, id, ident.Tags{}, 0)
  1486  	s.EXPECT().Tick(gomock.Any(), gomock.Any()).Do(func(interface{}, interface{}) {
  1487  		// Emulate a write taking place and staying open just after tick for this series
  1488  		var err error
  1489  		entry, err = shard.writableSeries(id, convert.EmptyTagMetadataResolver)
  1490  		require.NoError(t, err)
  1491  	}).Return(series.TickResult{}, series.ErrSeriesAllDatapointsExpired)
  1492  
  1493  	r, err := shard.tickAndExpire(context.NewNoOpCanncellable(), tickPolicyRegular, namespace.Context{})
  1494  	require.NoError(t, err)
  1495  	require.Equal(t, 0, r.activeSeries)
  1496  	require.Equal(t, 1, r.expiredSeries)
  1497  	require.Equal(t, 1, shard.lookup.Len())
  1498  
  1499  	entry.DecrementReaderWriterCount()
  1500  	require.Equal(t, 1, shard.lookup.Len())
  1501  }
  1502  
  1503  func TestForEachShardEntry(t *testing.T) {
  1504  	opts := DefaultTestOptions()
  1505  	shard := testDatabaseShard(t, opts)
  1506  	defer shard.Close()
  1507  	for i := 0; i < 10; i++ {
  1508  		addTestSeries(shard, ident.StringID(fmt.Sprintf("foo.%d", i)))
  1509  	}
  1510  
  1511  	count := 0
  1512  	entryFn := func(entry *Entry) bool {
  1513  		if entry.Series.ID().String() == "foo.8" {
  1514  			return false
  1515  		}
  1516  
  1517  		// Ensure the readerwriter count is incremented while we operate
  1518  		// on this series
  1519  		assert.Equal(t, int32(1), entry.ReaderWriterCount())
  1520  
  1521  		count++
  1522  		return true
  1523  	}
  1524  
  1525  	shard.forEachShardEntry(entryFn)
  1526  
  1527  	assert.Equal(t, 8, count)
  1528  
  1529  	// Ensure that reader writer count gets reset
  1530  	shard.RLock()
  1531  	for elem := shard.list.Front(); elem != nil; elem = elem.Next() {
  1532  		entry := elem.Value.(*Entry)
  1533  		assert.Equal(t, int32(0), entry.ReaderWriterCount())
  1534  	}
  1535  	shard.RUnlock()
  1536  }
  1537  
  1538  func TestShardFetchBlocksIDNotExists(t *testing.T) {
  1539  	opts := DefaultTestOptions()
  1540  	ctx := opts.ContextPool().Get()
  1541  	defer ctx.Close()
  1542  
  1543  	shard := testDatabaseShard(t, opts)
  1544  	defer shard.Close()
  1545  	fetched, err := shard.FetchBlocks(ctx, ident.StringID("foo"), nil, namespace.Context{})
  1546  	require.NoError(t, err)
  1547  	require.Equal(t, 0, len(fetched))
  1548  }
  1549  
  1550  func TestShardFetchBlocksIDExists(t *testing.T) {
  1551  	ctrl := xtest.NewController(t)
  1552  	defer ctrl.Finish()
  1553  
  1554  	opts := DefaultTestOptions()
  1555  	ctx := opts.ContextPool().Get()
  1556  	defer ctx.Close()
  1557  
  1558  	shard := testDatabaseShard(t, opts)
  1559  	defer shard.Close()
  1560  	id := ident.StringID("foo")
  1561  	series := addMockSeries(ctrl, shard, id, ident.Tags{}, 0)
  1562  	now := xtime.Now()
  1563  	starts := []xtime.UnixNano{now}
  1564  	expected := []block.FetchBlockResult{block.NewFetchBlockResult(now, nil, nil)}
  1565  	series.EXPECT().FetchBlocks(ctx, starts, gomock.Any()).Return(expected, nil)
  1566  	res, err := shard.FetchBlocks(ctx, id, starts, namespace.Context{})
  1567  	require.NoError(t, err)
  1568  	require.Equal(t, expected, res)
  1569  }
  1570  
  1571  func TestShardCleanupExpiredFileSets(t *testing.T) {
  1572  	opts := DefaultTestOptions()
  1573  	shard := testDatabaseShard(t, opts)
  1574  	defer shard.Close()
  1575  	shard.filesetPathsBeforeFn = func(
  1576  		_ string, namespace ident.ID,
  1577  		shardID uint32, _ xtime.UnixNano,
  1578  	) ([]string, error) {
  1579  		return []string{namespace.String(), strconv.Itoa(int(shardID))}, nil
  1580  	}
  1581  	var deletedFiles []string
  1582  	shard.deleteFilesFn = func(files []string) error {
  1583  		deletedFiles = append(deletedFiles, files...)
  1584  		return nil
  1585  	}
  1586  	require.NoError(t, shard.CleanupExpiredFileSets(xtime.Now()))
  1587  	require.Equal(t, []string{defaultTestNs1ID.String(), "0"}, deletedFiles)
  1588  }
  1589  
  1590  type testCloser struct {
  1591  	called int
  1592  }
  1593  
  1594  func (c *testCloser) Close() {
  1595  	c.called++
  1596  }
  1597  
  1598  func TestShardRegisterRuntimeOptionsListeners(t *testing.T) {
  1599  	ctrl := xtest.NewController(t)
  1600  	defer ctrl.Finish()
  1601  
  1602  	callRegisterListenerOnShard := 0
  1603  	callRegisterListenerOnShardInsertQueue := 0
  1604  
  1605  	closer := &testCloser{}
  1606  
  1607  	runtimeOptsMgr := runtime.NewMockOptionsManager(ctrl)
  1608  	runtimeOptsMgr.EXPECT().
  1609  		RegisterListener(gomock.Any()).
  1610  		Times(2).
  1611  		Do(func(l runtime.OptionsListener) {
  1612  			if _, ok := l.(*dbShard); ok {
  1613  				callRegisterListenerOnShard++
  1614  			}
  1615  			if _, ok := l.(*dbShardInsertQueue); ok {
  1616  				callRegisterListenerOnShardInsertQueue++
  1617  			}
  1618  		}).
  1619  		Return(closer)
  1620  
  1621  	opts := DefaultTestOptions().
  1622  		SetRuntimeOptionsManager(runtimeOptsMgr)
  1623  
  1624  	shard := testDatabaseShard(t, opts)
  1625  
  1626  	assert.Equal(t, 1, callRegisterListenerOnShard)
  1627  	assert.Equal(t, 1, callRegisterListenerOnShardInsertQueue)
  1628  
  1629  	assert.Equal(t, 0, closer.called)
  1630  
  1631  	shard.Close()
  1632  
  1633  	assert.Equal(t, 2, closer.called)
  1634  }
  1635  
  1636  func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) {
  1637  	dir, err := ioutil.TempDir("", "testdir")
  1638  	require.NoError(t, err)
  1639  	defer os.RemoveAll(dir)
  1640  
  1641  	ctrl := xtest.NewController(t)
  1642  	defer ctrl.Finish()
  1643  
  1644  	opts := DefaultTestOptions().
  1645  		SetSeriesCachePolicy(series.CacheRecentlyRead)
  1646  	fsOpts := opts.CommitLogOptions().FilesystemOptions().
  1647  		SetFilePathPrefix(dir)
  1648  	opts = opts.
  1649  		SetCommitLogOptions(opts.CommitLogOptions().
  1650  			SetFilesystemOptions(fsOpts))
  1651  
  1652  	shard := testDatabaseShard(t, opts)
  1653  	defer shard.Close()
  1654  
  1655  	ctx := context.NewBackground()
  1656  	defer ctx.Close()
  1657  
  1658  	nsCtx := namespace.Context{ID: ident.StringID("foo")}
  1659  	require.NoError(t, shard.Bootstrap(ctx, nsCtx))
  1660  
  1661  	ropts := shard.seriesOpts.RetentionOptions()
  1662  	end := xtime.ToUnixNano(opts.ClockOptions().NowFn()()).Truncate(ropts.BlockSize())
  1663  	start := end.Add(-2 * ropts.BlockSize())
  1664  	shard.markWarmDataFlushStateSuccess(start)
  1665  	shard.markWarmDataFlushStateSuccess(start.Add(ropts.BlockSize()))
  1666  
  1667  	retriever := block.NewMockDatabaseBlockRetriever(ctrl)
  1668  	shard.setBlockRetriever(retriever)
  1669  
  1670  	segments := []ts.Segment{
  1671  		ts.NewSegment(checked.NewBytes([]byte("bar"), nil), nil, 0, ts.FinalizeNone),
  1672  		ts.NewSegment(checked.NewBytes([]byte("baz"), nil), nil, 1, ts.FinalizeNone),
  1673  	}
  1674  
  1675  	var blockReaders []xio.BlockReader
  1676  	for range segments {
  1677  		reader := xio.NewMockSegmentReader(ctrl)
  1678  		block := xio.BlockReader{
  1679  			SegmentReader: reader,
  1680  		}
  1681  
  1682  		blockReaders = append(blockReaders, block)
  1683  	}
  1684  
  1685  	mid := start.Add(ropts.BlockSize())
  1686  
  1687  	retriever.EXPECT().
  1688  		Stream(ctx, shard.shard, ident.NewIDMatcher("foo"),
  1689  			start, shard.seriesOnRetrieveBlock, gomock.Any()).
  1690  		Do(func(
  1691  			ctx context.Context, shard uint32, id ident.ID, at xtime.UnixNano,
  1692  			onRetrieve block.OnRetrieveBlock, nsCtx namespace.Context,
  1693  		) {
  1694  			go onRetrieve.OnRetrieveBlock(id, ident.EmptyTagIterator, at, segments[0], nsCtx)
  1695  		}).
  1696  		Return(blockReaders[0], nil)
  1697  	retriever.EXPECT().
  1698  		Stream(ctx, shard.shard, ident.NewIDMatcher("foo"),
  1699  			mid, shard.seriesOnRetrieveBlock, gomock.Any()).
  1700  		Do(func(ctx context.Context, shard uint32, id ident.ID, at xtime.UnixNano,
  1701  			onRetrieve block.OnRetrieveBlock, nsCtx namespace.Context,
  1702  		) {
  1703  			go onRetrieve.OnRetrieveBlock(id, ident.EmptyTagIterator, at, segments[1], nsCtx)
  1704  		}).
  1705  		Return(blockReaders[1], nil)
  1706  
  1707  	// Check reads as expected
  1708  	iter, err := shard.ReadEncoded(ctx, ident.StringID("foo"), start, end, namespace.Context{})
  1709  	require.NoError(t, err)
  1710  	count := 0
  1711  	for iter.Next(ctx) {
  1712  		require.Equal(t, 1, len(iter.Current()))
  1713  		assert.Equal(t, blockReaders[count], iter.Current()[0])
  1714  		count++
  1715  	}
  1716  	require.Equal(t, 2, count)
  1717  	require.NoError(t, iter.Err())
  1718  
  1719  	// Check that gets cached
  1720  	begin := time.Now()
  1721  	for time.Since(begin) < 10*time.Second {
  1722  		shard.RLock()
  1723  		entry, err := shard.lookupEntryWithLock(ident.StringID("foo"))
  1724  		shard.RUnlock()
  1725  		if err == errShardEntryNotFound {
  1726  			time.Sleep(5 * time.Millisecond)
  1727  			continue
  1728  		}
  1729  
  1730  		if err != nil || entry.Series.NumActiveBlocks() == 2 {
  1731  			// Expecting at least 2 active blocks and never an error
  1732  			break
  1733  		}
  1734  	}
  1735  
  1736  	shard.RLock()
  1737  	entry, err := shard.lookupEntryWithLock(ident.StringID("foo"))
  1738  	shard.RUnlock()
  1739  	require.NoError(t, err)
  1740  	require.NotNil(t, entry)
  1741  
  1742  	assert.False(t, entry.Series.IsEmpty())
  1743  	assert.Equal(t, 2, entry.Series.NumActiveBlocks())
  1744  }
  1745  
  1746  func TestShardNewInvalidShardEntry(t *testing.T) {
  1747  	ctrl := xtest.NewController(t)
  1748  	defer ctrl.Finish()
  1749  
  1750  	shard := testDatabaseShard(t, DefaultTestOptions())
  1751  	defer shard.Close()
  1752  
  1753  	iter := ident.NewMockTagIterator(ctrl)
  1754  	gomock.InOrder(
  1755  		iter.EXPECT().Duplicate().Return(iter),
  1756  		iter.EXPECT().Remaining().Return(8),
  1757  		iter.EXPECT().Next().Return(false),
  1758  		iter.EXPECT().Err().Return(fmt.Errorf("random err")),
  1759  		iter.EXPECT().Close(),
  1760  	)
  1761  
  1762  	_, err := shard.newShardEntry(ident.StringID("abc"), convert.NewTagsIterMetadataResolver(iter))
  1763  	require.Error(t, err)
  1764  }
  1765  
  1766  func TestShardNewValidShardEntry(t *testing.T) {
  1767  	ctrl := xtest.NewController(t)
  1768  	defer ctrl.Finish()
  1769  
  1770  	shard := testDatabaseShard(t, DefaultTestOptions())
  1771  	defer shard.Close()
  1772  
  1773  	_, err := shard.newShardEntry(
  1774  		ident.StringID("abc"),
  1775  		convert.NewTagsIterMetadataResolver(ident.EmptyTagIterator),
  1776  	)
  1777  	require.NoError(t, err)
  1778  }
  1779  
  1780  // TestShardNewEntryDoesNotAlterTags tests that the ID and Tags passed
  1781  // to newShardEntry is not altered. There are multiple callers that
  1782  // reuse the tag iterator passed all the way through to newShardEntry
  1783  // either to retry inserting a series or to finalize the tags at the
  1784  // end of a request/response cycle or from a disk retrieve cycle.
  1785  func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) {
  1786  	ctrl := xtest.NewController(t)
  1787  	defer ctrl.Finish()
  1788  
  1789  	shard := testDatabaseShard(t, DefaultTestOptions())
  1790  	defer shard.Close()
  1791  
  1792  	seriesID := ident.StringID("foo+bar=baz")
  1793  	seriesTags := ident.NewTags(ident.Tag{
  1794  		Name:  ident.StringID("bar"),
  1795  		Value: ident.StringID("baz"),
  1796  	})
  1797  
  1798  	// Ensure copied with call to bytes but no close call, etc
  1799  	id := ident.NewMockID(ctrl)
  1800  	id.EXPECT().Bytes().Times(1).Return(seriesID.Bytes())
  1801  
  1802  	iter := ident.NewMockTagIterator(ctrl)
  1803  
  1804  	// Ensure duplicate called but no close, etc
  1805  	iter.EXPECT().
  1806  		Duplicate().
  1807  		Times(1).
  1808  		Return(ident.NewTagsIterator(seriesTags))
  1809  
  1810  	entry, err := shard.newShardEntry(id, convert.NewTagsIterMetadataResolver(iter))
  1811  	require.NoError(t, err)
  1812  
  1813  	shard.Lock()
  1814  	shard.insertNewShardEntryWithLock(entry)
  1815  	shard.Unlock()
  1816  
  1817  	entry, _, err = shard.TryRetrieveSeriesAndIncrementReaderWriterCount(seriesID)
  1818  	require.NoError(t, err)
  1819  
  1820  	entryIDBytes := entry.Series.ID().Bytes()
  1821  	seriesIDBytes := seriesID.Bytes()
  1822  
  1823  	// Ensure ID equal and not same ref
  1824  	assert.True(t, entry.Series.ID().Equal(seriesID))
  1825  	// NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section
  1826  	assert.False(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0]))
  1827  }
  1828  
  1829  func TestShardIterateBatchSize(t *testing.T) {
  1830  	smaller := shardIterateBatchMinSize - 1
  1831  	require.Equal(t, shardIterateBatchMinSize, iterateBatchSize(smaller))
  1832  
  1833  	require.Equal(t, shardIterateBatchMinSize, iterateBatchSize(shardIterateBatchMinSize+1))
  1834  
  1835  	require.True(t, shardIterateBatchMinSize < iterateBatchSize(2000))
  1836  }
  1837  
  1838  func TestShardAggregateTiles(t *testing.T) {
  1839  	ctrl := xtest.NewController(t)
  1840  	defer ctrl.Finish()
  1841  
  1842  	ctx := context.NewBackground()
  1843  	defer ctx.Close()
  1844  
  1845  	var (
  1846  		targetBlockSize = 2 * time.Hour
  1847  		start           = xtime.Now().Truncate(targetBlockSize)
  1848  		opts            = AggregateTilesOptions{
  1849  			Start: start, End: start.Add(targetBlockSize), Step: 10 * time.Minute,
  1850  		}
  1851  
  1852  		expectedProcessedTileCount = int64(4)
  1853  
  1854  		err error
  1855  	)
  1856  
  1857  	aggregator := NewMockTileAggregator(ctrl)
  1858  	testOpts := DefaultTestOptions().SetTileAggregator(aggregator)
  1859  
  1860  	targetShard := testDatabaseShardWithIndexFn(t, testOpts, nil, true)
  1861  	defer assert.NoError(t, targetShard.Close())
  1862  
  1863  	var (
  1864  		noOpColdFlushNs = &persist.NoOpColdFlushNamespace{}
  1865  		sourceNs        = NewMockNamespace(ctrl)
  1866  		targetNs        = NewMockNamespace(ctrl)
  1867  	)
  1868  
  1869  	aggregator.EXPECT().
  1870  		AggregateTiles(ctx, sourceNs, targetNs, targetShard.ID(), noOpColdFlushNs, opts).
  1871  		Return(expectedProcessedTileCount, 33, nil)
  1872  
  1873  	processedTileCount, err := targetShard.AggregateTiles(
  1874  		ctx, sourceNs, targetNs, targetShard.ID(), noOpColdFlushNs, opts)
  1875  	require.NoError(t, err)
  1876  	assert.Equal(t, expectedProcessedTileCount, processedTileCount)
  1877  
  1878  	flushState, ok := targetShard.flushState.statesByTime[start]
  1879  	require.True(t, ok)
  1880  	assert.Equal(t, fileOpSuccess, flushState.WarmStatus.DataFlushed)
  1881  	assert.Equal(t, fileOpSuccess, flushState.WarmStatus.IndexFlushed)
  1882  	assert.Zero(t, flushState.NumFailures)
  1883  }
  1884  
  1885  func TestOpenStreamingReader(t *testing.T) {
  1886  	ctrl := xtest.NewController(t)
  1887  	defer ctrl.Finish()
  1888  
  1889  	var (
  1890  		blockStart = xtime.Now().Truncate(time.Hour)
  1891  		testOpts   = DefaultTestOptions()
  1892  	)
  1893  
  1894  	shard := testDatabaseShard(t, testOpts)
  1895  	defer assert.NoError(t, shard.Close())
  1896  
  1897  	latestSourceVolume, err := shard.LatestVolume(blockStart)
  1898  	require.NoError(t, err)
  1899  
  1900  	openOpts := fs.DataReaderOpenOptions{
  1901  		Identifier: fs.FileSetFileIdentifier{
  1902  			Namespace:   shard.namespace.ID(),
  1903  			Shard:       shard.ID(),
  1904  			BlockStart:  blockStart,
  1905  			VolumeIndex: latestSourceVolume,
  1906  		},
  1907  		FileSetType:      persist.FileSetFlushType,
  1908  		StreamingEnabled: true,
  1909  	}
  1910  
  1911  	reader := fs.NewMockDataFileSetReader(ctrl)
  1912  	reader.EXPECT().Open(openOpts).Return(nil)
  1913  
  1914  	shard.newReaderFn = func(pool.CheckedBytesPool, fs.Options) (fs.DataFileSetReader, error) {
  1915  		return reader, nil
  1916  	}
  1917  
  1918  	_, err = shard.OpenStreamingReader(blockStart)
  1919  	require.NoError(t, err)
  1920  }
  1921  
  1922  func TestSeriesRefResolver(t *testing.T) {
  1923  	ctrl := xtest.NewController(t)
  1924  	shard := testDatabaseShard(t, DefaultTestOptions())
  1925  	ctx := context.NewBackground()
  1926  	defer func() {
  1927  		ctrl.Finish()
  1928  		_ = shard.Close()
  1929  		ctx.Close()
  1930  	}()
  1931  
  1932  	seriesID := ident.StringID("foo+bar=baz")
  1933  	seriesTags := ident.NewTags(ident.Tag{
  1934  		Name:  ident.StringID("bar"),
  1935  		Value: ident.StringID("baz"),
  1936  	})
  1937  
  1938  	iter := ident.NewMockTagIterator(ctrl)
  1939  	// Ensure duplicate called but no close, etc
  1940  	iter.EXPECT().
  1941  		Duplicate().
  1942  		Return(ident.NewTagsIterator(seriesTags))
  1943  
  1944  	now := xtime.Now()
  1945  
  1946  	resolver, err := shard.SeriesRefResolver(seriesID, iter)
  1947  	require.NoError(t, err)
  1948  	seriesRef, err := resolver.SeriesRef()
  1949  	require.NoError(t, err)
  1950  	write, writeType, err := seriesRef.Write(ctx, now, 1.0, xtime.Second,
  1951  		[]byte("annotation1"), series.WriteOptions{})
  1952  	require.NoError(t, err)
  1953  	require.Equal(t, series.WarmWrite, writeType)
  1954  	require.True(t, write)
  1955  
  1956  	// should return already inserted entry as series.
  1957  	resolverEntry, err := shard.SeriesRefResolver(seriesID, iter)
  1958  	require.NoError(t, err)
  1959  	require.IsType(t, &Entry{}, resolverEntry)
  1960  	refEntry, err := resolverEntry.SeriesRef()
  1961  	require.NoError(t, err)
  1962  	require.Equal(t, seriesRef, refEntry)
  1963  
  1964  	databaseBlock := block.NewMockDatabaseBlock(ctrl)
  1965  	databaseBlock.EXPECT().StartTime().Return(now).AnyTimes()
  1966  	err = seriesRef.LoadBlock(databaseBlock, series.ColdWrite)
  1967  	require.NoError(t, err)
  1968  
  1969  	resolver.ReleaseRef()
  1970  	resolverEntry.ReleaseRef()
  1971  	entry := seriesRef.(*Entry)
  1972  	require.Zero(t, entry.ReaderWriterCount())
  1973  }
  1974  
  1975  // TestSeriesRefResolverAsync tests async resolver creation/closure for the same series
  1976  // to validate proper ref counting.
  1977  func TestSeriesRefResolverAsync(t *testing.T) {
  1978  	ctrl := xtest.NewController(t)
  1979  	shard := testDatabaseShard(t, DefaultTestOptions())
  1980  	ctx := context.NewBackground()
  1981  	defer func() {
  1982  		ctrl.Finish()
  1983  		_ = shard.Close()
  1984  		ctx.Close()
  1985  	}()
  1986  
  1987  	seriesID := ident.StringID("foo+bar=baz")
  1988  	seriesTags := ident.NewTags(ident.Tag{
  1989  		Name:  ident.StringID("bar"),
  1990  		Value: ident.StringID("baz"),
  1991  	})
  1992  
  1993  	iter := ident.NewTagsIterator(seriesTags)
  1994  
  1995  	// This resolution path is async due to the use of the index insert queue.
  1996  	// When many entries for the same series are queued up at once, only one
  1997  	// is persisted in the shard map. We induce this by spinning up N goroutines
  1998  	// to cause an insert for the same series, and release them all at once.
  1999  	// We then verify at the end that the ref counts are correctly freed for the
  2000  	// entry ultimately in the shard map (i.e. it should have zero outstanding after
  2001  	// every resolver is closed).
  2002  	var (
  2003  		start  sync.WaitGroup
  2004  		finish sync.WaitGroup
  2005  	)
  2006  	start.Add(1)
  2007  	for i := 0; i < 100; i++ {
  2008  		i := i
  2009  		finish.Add(1)
  2010  		go func() {
  2011  			start.Wait()
  2012  
  2013  			resolver, err := shard.SeriesRefResolver(seriesID, iter)
  2014  			require.NoError(t, err)
  2015  
  2016  			if i%2 == 0 {
  2017  				// Half the time exercise the ref retrieval path.
  2018  				_, err = resolver.SeriesRef()
  2019  				require.NoError(t, err)
  2020  			}
  2021  
  2022  			resolver.ReleaseRef()
  2023  
  2024  			finish.Done()
  2025  		}()
  2026  	}
  2027  
  2028  	start.Done()
  2029  	finish.Wait()
  2030  
  2031  	entryInShard, _, err := shard.TryRetrieveSeriesAndIncrementReaderWriterCount(seriesID)
  2032  	require.NoError(t, err)
  2033  	entryInShard.DecrementReaderWriterCount() // Decrement because the above retrieval increments.
  2034  	require.Equal(t, int32(0), entryInShard.ReaderWriterCount())
  2035  }
  2036  
  2037  func TestFilterBlocksNeedSnapshot(t *testing.T) {
  2038  	bootstraping := Bootstrapping
  2039  	for _, tc := range []struct {
  2040  		name              string
  2041  		blocks            []int
  2042  		expectedSnapshots []int
  2043  		seriesWritten     [][]int
  2044  		bootstrapState    *BootstrapState
  2045  	}{
  2046  		{
  2047  			name:              "all blocks are empty",
  2048  			seriesWritten:     [][]int{},
  2049  			blocks:            []int{-2, -1, 0},
  2050  			expectedSnapshots: []int{},
  2051  		},
  2052  		{
  2053  			name:              "all blocks snapshotable",
  2054  			seriesWritten:     [][]int{{-2, -1, 0}},
  2055  			blocks:            []int{-2, -1, 0},
  2056  			expectedSnapshots: []int{-2, -1, 0},
  2057  		},
  2058  		{
  2059  			name:              "different series written to different blocks",
  2060  			seriesWritten:     [][]int{{0}, {-1}, {-2}},
  2061  			blocks:            []int{-2, -1, 0},
  2062  			expectedSnapshots: []int{-2, -1, 0},
  2063  		},
  2064  		{
  2065  			name:              "different series written to different blocks single block checked",
  2066  			seriesWritten:     [][]int{{0}, {-1}, {-2}},
  2067  			blocks:            []int{-1},
  2068  			expectedSnapshots: []int{-1},
  2069  		},
  2070  		{
  2071  			name: "requested blocks are satisfied only by single series",
  2072  			seriesWritten: [][]int{
  2073  				{-2, 0},
  2074  				{-1},
  2075  			},
  2076  			blocks:            []int{-1},
  2077  			expectedSnapshots: []int{-1},
  2078  		},
  2079  		{
  2080  			name:              "current block is snapshotable",
  2081  			seriesWritten:     [][]int{{0}},
  2082  			blocks:            []int{0},
  2083  			expectedSnapshots: []int{0},
  2084  		},
  2085  		{
  2086  			name:              "no blocks are snapshottable when not bootstrapped",
  2087  			seriesWritten:     [][]int{{0}},
  2088  			blocks:            []int{0},
  2089  			expectedSnapshots: []int{},
  2090  			bootstrapState:    &bootstraping,
  2091  		},
  2092  		{
  2093  			name:              "previous block is not snapshotable",
  2094  			seriesWritten:     [][]int{{0}},
  2095  			blocks:            []int{-1, 0},
  2096  			expectedSnapshots: []int{0},
  2097  		},
  2098  		{
  2099  			name:              "cold write to a previous block",
  2100  			seriesWritten:     [][]int{{-1}},
  2101  			blocks:            []int{-1, 0},
  2102  			expectedSnapshots: []int{-1},
  2103  		},
  2104  		{
  2105  			name:              "order not lost after filtering",
  2106  			seriesWritten:     [][]int{{0, -1, -2}},
  2107  			blocks:            []int{-1, -2, 0},
  2108  			expectedSnapshots: []int{-1, -2, 0},
  2109  		},
  2110  	} {
  2111  		tc := tc
  2112  		t.Run(tc.name, func(t *testing.T) {
  2113  			var (
  2114  				now           = xtime.Now()
  2115  				opts          = DefaultTestOptions()
  2116  				shard         = testDatabaseShardWithIndexFn(t, opts, nil, true)
  2117  				ctx           = context.NewBackground()
  2118  				blockSize     = shard.namespace.Options().RetentionOptions().BlockSize()
  2119  				getBlockStart = func(idx int) xtime.UnixNano {
  2120  					return now.Add(time.Duration(idx) * blockSize).Truncate(blockSize)
  2121  				}
  2122  				toBlockStarts = func(idxs []int) []xtime.UnixNano {
  2123  					var r []xtime.UnixNano
  2124  					for _, blockNum := range idxs {
  2125  						r = append(r, getBlockStart(blockNum))
  2126  					}
  2127  					return r
  2128  				}
  2129  			)
  2130  			shard.bootstrapState = Bootstrapped
  2131  			if tc.bootstrapState != nil {
  2132  				shard.bootstrapState = *tc.bootstrapState
  2133  			}
  2134  			defer func() {
  2135  				require.NoError(t, shard.Close())
  2136  				ctx.Close()
  2137  			}()
  2138  
  2139  			for idx, seriesWritten := range tc.seriesWritten {
  2140  				for _, blockNum := range seriesWritten {
  2141  					timestamp := getBlockStart(blockNum).Add(time.Second)
  2142  					_, err := shard.Write(ctx, ident.StringID(fmt.Sprintf("foo.%d", idx)),
  2143  						timestamp, 1.0, xtime.Second, nil, series.WriteOptions{},
  2144  					)
  2145  					require.NoError(t, err)
  2146  				}
  2147  			}
  2148  
  2149  			res := shard.FilterBlocksNeedSnapshot(toBlockStarts(tc.blocks))
  2150  			if len(tc.expectedSnapshots) == 0 {
  2151  				assert.Empty(t, res)
  2152  			} else {
  2153  				assert.Equal(t, toBlockStarts(tc.expectedSnapshots), shard.FilterBlocksNeedSnapshot(toBlockStarts(tc.blocks)))
  2154  			}
  2155  		})
  2156  	}
  2157  }