github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/storage/series/buffer_test.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package series
    22  
    23  import (
    24  	"sort"
    25  	"strings"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/encoding"
    30  	"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
    31  	"github.com/m3db/m3/src/dbnode/namespace"
    32  	"github.com/m3db/m3/src/dbnode/persist"
    33  	m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
    34  	"github.com/m3db/m3/src/dbnode/storage/block"
    35  	"github.com/m3db/m3/src/dbnode/ts"
    36  	"github.com/m3db/m3/src/dbnode/x/xio"
    37  	"github.com/m3db/m3/src/m3ninx/doc"
    38  	"github.com/m3db/m3/src/x/checked"
    39  	"github.com/m3db/m3/src/x/clock"
    40  	"github.com/m3db/m3/src/x/context"
    41  	xerrors "github.com/m3db/m3/src/x/errors"
    42  	"github.com/m3db/m3/src/x/ident"
    43  	xtime "github.com/m3db/m3/src/x/time"
    44  
    45  	"github.com/golang/mock/gomock"
    46  	"github.com/stretchr/testify/assert"
    47  	"github.com/stretchr/testify/require"
    48  )
    49  
    50  var testID = ident.StringID("foo")
    51  
    52  func newBufferTestOptions() Options {
    53  	encoderPool := encoding.NewEncoderPool(nil)
    54  	multiReaderIteratorPool := encoding.NewMultiReaderIteratorPool(nil)
    55  
    56  	encodingOpts := encoding.NewOptions().SetEncoderPool(encoderPool)
    57  
    58  	encoderPool.Init(func() encoding.Encoder {
    59  		return m3tsz.NewEncoder(0, nil, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
    60  	})
    61  	multiReaderIteratorPool.Init(m3tsz.DefaultReaderIteratorAllocFn(encodingOpts))
    62  
    63  	bufferBucketPool := NewBufferBucketPool(nil)
    64  	bufferBucketVersionsPool := NewBufferBucketVersionsPool(nil)
    65  
    66  	opts := NewOptions().
    67  		SetEncoderPool(encoderPool).
    68  		SetMultiReaderIteratorPool(multiReaderIteratorPool).
    69  		SetBufferBucketPool(bufferBucketPool).
    70  		SetBufferBucketVersionsPool(bufferBucketVersionsPool).
    71  		SetRuntimeOptionsManager(m3dbruntime.NewOptionsManager())
    72  	opts = opts.
    73  		SetRetentionOptions(opts.RetentionOptions().
    74  			SetBlockSize(2 * time.Minute).
    75  			SetBufferFuture(10 * time.Second).
    76  			SetBufferPast(10 * time.Second)).
    77  		SetDatabaseBlockOptions(opts.DatabaseBlockOptions().
    78  			SetContextPool(opts.ContextPool()).
    79  			SetEncoderPool(opts.EncoderPool()))
    80  	return opts
    81  }
    82  
    83  // Writes to buffer, verifying no error and that further writes should happen.
    84  func verifyWriteToBufferSuccess(
    85  	t *testing.T,
    86  	id ident.ID,
    87  	buffer databaseBuffer,
    88  	v DecodedTestValue,
    89  	schema namespace.SchemaDescr,
    90  ) {
    91  	verifyWriteToBuffer(t, id, buffer, v, schema, true, false)
    92  }
    93  
    94  func verifyWriteToBuffer(
    95  	t *testing.T,
    96  	id ident.ID,
    97  	buffer databaseBuffer,
    98  	v DecodedTestValue,
    99  	schema namespace.SchemaDescr,
   100  	expectWritten bool,
   101  	expectErr bool,
   102  ) {
   103  	ctx := context.NewBackground()
   104  	defer ctx.Close()
   105  
   106  	wasWritten, _, err := buffer.Write(ctx, id, v.Timestamp, v.Value, v.Unit,
   107  		v.Annotation, WriteOptions{SchemaDesc: schema})
   108  
   109  	if expectErr {
   110  		require.Error(t, err)
   111  	} else {
   112  		require.NoError(t, err)
   113  	}
   114  	require.Equal(t, expectWritten, wasWritten)
   115  }
   116  
   117  func TestBufferWriteTooFuture(t *testing.T) {
   118  	opts := newBufferTestOptions()
   119  	rops := opts.RetentionOptions()
   120  	curr := xtime.Now().Truncate(rops.BlockSize())
   121  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   122  		return curr.ToTime()
   123  	}))
   124  	buffer := newDatabaseBuffer().(*dbBuffer)
   125  	buffer.Reset(databaseBufferResetOptions{
   126  		Options: opts,
   127  	})
   128  	ctx := context.NewBackground()
   129  	defer ctx.Close()
   130  
   131  	wasWritten, _, err := buffer.Write(ctx, testID, curr.Add(rops.BufferFuture()), 1,
   132  		xtime.Second, nil, WriteOptions{})
   133  	assert.False(t, wasWritten)
   134  	assert.Error(t, err)
   135  	assert.True(t, xerrors.IsInvalidParams(err))
   136  	assert.True(t, strings.Contains(err.Error(), "datapoint too far in future"))
   137  	assert.True(t, strings.Contains(err.Error(), "id=foo"))
   138  	assert.True(t, strings.Contains(err.Error(), "timestamp="))
   139  	assert.True(t, strings.Contains(err.Error(), "future_limit="))
   140  }
   141  
   142  func TestBufferWriteTooPast(t *testing.T) {
   143  	opts := newBufferTestOptions()
   144  	rops := opts.RetentionOptions()
   145  	curr := xtime.Now().Truncate(rops.BlockSize())
   146  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   147  		return curr.ToTime()
   148  	}))
   149  	buffer := newDatabaseBuffer().(*dbBuffer)
   150  	buffer.Reset(databaseBufferResetOptions{
   151  		Options: opts,
   152  	})
   153  	ctx := context.NewBackground()
   154  	defer ctx.Close()
   155  	// Writes are inclusive on buffer past start border. Must be before that inclusive border to
   156  	// be a cold write. To test this we write a second further into the past.
   157  	wasWritten, _, err := buffer.Write(ctx, testID,
   158  		curr.Add(-1*rops.BufferPast()-time.Second), 1, xtime.Second,
   159  		nil, WriteOptions{})
   160  	assert.False(t, wasWritten)
   161  	assert.Error(t, err)
   162  	assert.True(t, xerrors.IsInvalidParams(err))
   163  	assert.True(t, strings.Contains(err.Error(), "datapoint too far in past"))
   164  	assert.True(t, strings.Contains(err.Error(), "id=foo"))
   165  	assert.True(t, strings.Contains(err.Error(), "timestamp="))
   166  	assert.True(t, strings.Contains(err.Error(), "past_limit="))
   167  }
   168  
   169  func maxDuration(a, b time.Duration) time.Duration {
   170  	if a > b {
   171  		return a
   172  	}
   173  	return b
   174  }
   175  
   176  func TestBufferWriteColdTooFutureRetention(t *testing.T) {
   177  	opts := newBufferTestOptions().SetColdWritesEnabled(true)
   178  	rops := opts.RetentionOptions()
   179  	curr := xtime.Now().Truncate(rops.BlockSize())
   180  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   181  		return curr.ToTime()
   182  	}))
   183  	buffer := newDatabaseBuffer().(*dbBuffer)
   184  	buffer.Reset(databaseBufferResetOptions{
   185  		Options: opts,
   186  	})
   187  	ctx := context.NewBackground()
   188  	defer ctx.Close()
   189  
   190  	futureRetention := time.Second +
   191  		maxDuration(rops.BufferFuture(), rops.FutureRetentionPeriod())
   192  	wasWritten, _, err := buffer.Write(ctx,
   193  		testID, curr.Add(futureRetention), 1, xtime.Second, nil, WriteOptions{})
   194  	assert.False(t, wasWritten)
   195  	assert.Error(t, err)
   196  	assert.True(t, xerrors.IsInvalidParams(err))
   197  	assert.True(t, strings.Contains(err.Error(), "datapoint too far in future and out of retention"))
   198  	assert.True(t, strings.Contains(err.Error(), "id=foo"))
   199  	assert.True(t, strings.Contains(err.Error(), "timestamp="))
   200  	assert.True(t, strings.Contains(err.Error(), "retention_future_limit="))
   201  }
   202  
   203  func TestBufferWriteColdTooPastRetention(t *testing.T) {
   204  	opts := newBufferTestOptions().SetColdWritesEnabled(true)
   205  	rops := opts.RetentionOptions()
   206  	curr := xtime.Now().Truncate(rops.BlockSize())
   207  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   208  		return curr.ToTime()
   209  	}))
   210  	buffer := newDatabaseBuffer().(*dbBuffer)
   211  	buffer.Reset(databaseBufferResetOptions{
   212  		Options: opts,
   213  	})
   214  	ctx := context.NewBackground()
   215  	defer ctx.Close()
   216  
   217  	pastRetention := time.Second +
   218  		maxDuration(rops.BufferPast(), rops.RetentionPeriod())
   219  	wasWritten, _, err := buffer.Write(ctx, testID,
   220  		curr.Add(-pastRetention), 1, xtime.Second,
   221  		nil, WriteOptions{})
   222  	assert.False(t, wasWritten)
   223  	assert.Error(t, err)
   224  	assert.True(t, xerrors.IsInvalidParams(err))
   225  	assert.True(t, strings.Contains(err.Error(), "datapoint too far in past and out of retention"))
   226  	assert.True(t, strings.Contains(err.Error(), "id=foo"))
   227  	assert.True(t, strings.Contains(err.Error(), "timestamp="))
   228  	assert.True(t, strings.Contains(err.Error(), "retention_past_limit="))
   229  }
   230  
   231  func TestBufferWriteError(t *testing.T) {
   232  	var (
   233  		opts   = newBufferTestOptions()
   234  		rops   = opts.RetentionOptions()
   235  		curr   = xtime.Now().Truncate(rops.BlockSize())
   236  		ctx    = context.NewBackground()
   237  		buffer = newDatabaseBuffer().(*dbBuffer)
   238  	)
   239  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   240  		return curr.ToTime()
   241  	}))
   242  	buffer.Reset(databaseBufferResetOptions{
   243  		Options: opts,
   244  	})
   245  	defer ctx.Close()
   246  
   247  	timeUnitNotExist := xtime.Unit(127)
   248  	wasWritten, _, err := buffer.Write(ctx, testID,
   249  		curr, 1, timeUnitNotExist, nil, WriteOptions{})
   250  	require.False(t, wasWritten)
   251  	require.Error(t, err)
   252  }
   253  
   254  func TestBufferWriteRead(t *testing.T) {
   255  	opts := newBufferTestOptions()
   256  	testBufferWriteRead(t, opts, nil)
   257  }
   258  
   259  func testBufferWriteRead(t *testing.T, opts Options, setAnn setAnnotation) {
   260  	rops := opts.RetentionOptions()
   261  	curr := xtime.Now().Truncate(rops.BlockSize())
   262  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   263  		return curr.ToTime()
   264  	}))
   265  	buffer := newDatabaseBuffer().(*dbBuffer)
   266  	buffer.Reset(databaseBufferResetOptions{
   267  		Options: opts,
   268  	})
   269  
   270  	data := []DecodedTestValue{
   271  		{curr.Add(secs(1)), 1, xtime.Second, nil},
   272  		{curr.Add(secs(2)), 2, xtime.Second, nil},
   273  		{curr.Add(secs(3)), 3, xtime.Second, nil},
   274  	}
   275  	var nsCtx namespace.Context
   276  	if setAnn != nil {
   277  		data = setAnn(data)
   278  		nsCtx = namespace.Context{Schema: testSchemaDesc}
   279  	}
   280  
   281  	for _, v := range data {
   282  		verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
   283  	}
   284  
   285  	ctx := context.NewBackground()
   286  	defer ctx.Close()
   287  
   288  	results, err := buffer.ReadEncoded(ctx, 0, timeDistantFuture, nsCtx)
   289  	assert.NoError(t, err)
   290  	assert.NotNil(t, results)
   291  
   292  	requireReaderValuesEqual(t, data, results, opts, nsCtx)
   293  }
   294  
   295  func TestBufferReadOnlyMatchingBuckets(t *testing.T) {
   296  	opts := newBufferTestOptions()
   297  	rops := opts.RetentionOptions()
   298  	curr := xtime.Now().Truncate(rops.BlockSize())
   299  	start := curr
   300  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   301  		return curr.ToTime()
   302  	}))
   303  	buffer := newDatabaseBuffer().(*dbBuffer)
   304  	buffer.Reset(databaseBufferResetOptions{
   305  		Options: opts,
   306  	})
   307  
   308  	data := []DecodedTestValue{
   309  		{curr.Add(mins(1)), 1, xtime.Second, nil},
   310  		{curr.Add(mins(3)), 2, xtime.Second, nil},
   311  	}
   312  
   313  	for _, v := range data {
   314  		curr = v.Timestamp
   315  		verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
   316  	}
   317  
   318  	ctx := context.NewBackground()
   319  	defer ctx.Close()
   320  
   321  	firstBucketStart := start.Truncate(time.Second)
   322  	firstBucketEnd := start.Add(mins(2)).Truncate(time.Second)
   323  	results, err := buffer.ReadEncoded(ctx, firstBucketStart, firstBucketEnd, namespace.Context{})
   324  	assert.NoError(t, err)
   325  	assert.NotNil(t, results)
   326  	requireReaderValuesEqual(t, []DecodedTestValue{data[0]}, results, opts, namespace.Context{})
   327  
   328  	secondBucketStart := start.Add(mins(2)).Truncate(time.Second)
   329  	secondBucketEnd := start.Add(mins(4)).Truncate(time.Second)
   330  	results, err = buffer.ReadEncoded(ctx, secondBucketStart, secondBucketEnd, namespace.Context{})
   331  	assert.NoError(t, err)
   332  	assert.NotNil(t, results)
   333  
   334  	requireReaderValuesEqual(t, []DecodedTestValue{data[1]}, results, opts, namespace.Context{})
   335  }
   336  
   337  func TestBufferWriteOutOfOrder(t *testing.T) {
   338  	opts := newBufferTestOptions()
   339  	rops := opts.RetentionOptions()
   340  	start := xtime.Now().Truncate(rops.BlockSize())
   341  	curr := start
   342  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   343  		return curr.ToTime()
   344  	}))
   345  	buffer := newDatabaseBuffer().(*dbBuffer)
   346  	buffer.Reset(databaseBufferResetOptions{
   347  		Options: opts,
   348  	})
   349  
   350  	data := []DecodedTestValue{
   351  		{curr, 1, xtime.Second, nil},
   352  		{curr.Add(secs(10)), 2, xtime.Second, nil},
   353  		{curr.Add(secs(5)), 3, xtime.Second, nil},
   354  	}
   355  
   356  	for _, v := range data {
   357  		if v.Timestamp.After(curr) {
   358  			curr = v.Timestamp
   359  		}
   360  		verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
   361  	}
   362  
   363  	buckets, ok := buffer.bucketVersionsAt(start)
   364  	require.True(t, ok)
   365  	bucket, ok := buckets.writableBucket(WarmWrite)
   366  	require.True(t, ok)
   367  	assert.Equal(t, 2, len(bucket.encoders))
   368  	assert.Equal(t, data[1].Timestamp, mustGetLastEncoded(t, bucket.encoders[0]).TimestampNanos)
   369  	assert.Equal(t, data[2].Timestamp, mustGetLastEncoded(t, bucket.encoders[1]).TimestampNanos)
   370  
   371  	// Restore data to in order for comparison.
   372  	sort.Sort(ValuesByTime(data))
   373  
   374  	ctx := context.NewBackground()
   375  	defer ctx.Close()
   376  
   377  	results, err := buffer.ReadEncoded(ctx, 9, timeDistantFuture, namespace.Context{})
   378  	assert.NoError(t, err)
   379  	assert.NotNil(t, results)
   380  
   381  	requireReaderValuesEqual(t, data, results, opts, namespace.Context{})
   382  }
   383  
   384  func newTestBufferBucketWithData(t *testing.T,
   385  	opts Options, setAnn setAnnotation,
   386  ) (*BufferBucket, []DecodedTestValue) {
   387  	rops := opts.RetentionOptions()
   388  	curr := xtime.Now().Truncate(rops.BlockSize())
   389  
   390  	bd := blockData{
   391  		start:     curr,
   392  		writeType: WarmWrite,
   393  		data: [][]DecodedTestValue{
   394  			{
   395  				{curr, 1, xtime.Second, nil},
   396  				{curr.Add(secs(10)), 2, xtime.Second, nil},
   397  				{curr.Add(secs(50)), 3, xtime.Second, nil},
   398  			},
   399  			{
   400  				{curr.Add(secs(20)), 4, xtime.Second, nil},
   401  				{curr.Add(secs(40)), 5, xtime.Second, nil},
   402  				{curr.Add(secs(60)), 6, xtime.Second, nil},
   403  			},
   404  			{
   405  				{curr.Add(secs(30)), 4, xtime.Second, nil},
   406  				{curr.Add(secs(70)), 5, xtime.Second, nil},
   407  			},
   408  			{
   409  				{curr.Add(secs(35)), 6, xtime.Second, nil},
   410  			},
   411  		},
   412  	}
   413  
   414  	return newTestBufferBucketWithCustomData(t, bd, opts, setAnn)
   415  }
   416  
   417  func newTestBufferBucketWithCustomData(
   418  	t *testing.T,
   419  	bd blockData,
   420  	opts Options,
   421  	setAnn setAnnotation,
   422  ) (*BufferBucket, []DecodedTestValue) {
   423  	b := &BufferBucket{opts: opts}
   424  	b.resetTo(bd.start, bd.writeType, opts)
   425  	b.firstWrite = xtime.ToUnixNano(opts.ClockOptions().NowFn()())
   426  	data := bd.data
   427  
   428  	// Empty all existing encoders.
   429  	b.encoders = nil
   430  
   431  	var nsCtx namespace.Context
   432  	if setAnn != nil {
   433  		nsCtx = namespace.Context{Schema: testSchemaDesc}
   434  	}
   435  	var expected []DecodedTestValue
   436  	for i := 0; i < len(data); i++ {
   437  		if setAnn != nil {
   438  			data[i] = setAnn(data[i])
   439  		}
   440  
   441  		encoded := 0
   442  		encoder := opts.EncoderPool().Get()
   443  		encoder.Reset(bd.start, 0, nsCtx.Schema)
   444  		for _, v := range data[i] {
   445  			dp := ts.Datapoint{
   446  				TimestampNanos: v.Timestamp,
   447  				Value:          v.Value,
   448  			}
   449  			err := encoder.Encode(dp, v.Unit, v.Annotation)
   450  			require.NoError(t, err)
   451  			encoded++
   452  		}
   453  		b.encoders = append(b.encoders, inOrderEncoder{encoder: encoder})
   454  		expected = append(expected, data[i]...)
   455  	}
   456  	sort.Sort(ValuesByTime(expected))
   457  	return b, expected
   458  }
   459  
   460  func newTestBufferBucketsWithData(t *testing.T, opts Options,
   461  	setAnn setAnnotation,
   462  ) (*BufferBucketVersions, []DecodedTestValue) {
   463  	newBucket, vals := newTestBufferBucketWithData(t, opts, setAnn)
   464  	return &BufferBucketVersions{
   465  		buckets: []*BufferBucket{newBucket},
   466  		start:   newBucket.start,
   467  		opts:    opts,
   468  	}, vals
   469  }
   470  
   471  func newTestBufferBucketVersionsWithCustomData(
   472  	t *testing.T,
   473  	bd blockData,
   474  	opts Options,
   475  	setAnn setAnnotation,
   476  ) (*BufferBucketVersions, []DecodedTestValue) {
   477  	newBucket, vals := newTestBufferBucketWithCustomData(t, bd, opts, setAnn)
   478  	return &BufferBucketVersions{
   479  		buckets:    []*BufferBucket{newBucket},
   480  		start:      newBucket.start,
   481  		opts:       opts,
   482  		bucketPool: opts.BufferBucketPool(),
   483  	}, vals
   484  }
   485  
   486  func newTestBufferWithCustomData(
   487  	t *testing.T,
   488  	blockDatas []blockData,
   489  	opts Options,
   490  	setAnn setAnnotation,
   491  ) (*dbBuffer, map[xtime.UnixNano][]DecodedTestValue) {
   492  	buffer := newDatabaseBuffer().(*dbBuffer)
   493  	buffer.Reset(databaseBufferResetOptions{
   494  		Options: opts,
   495  	})
   496  	expectedMap := make(map[xtime.UnixNano][]DecodedTestValue)
   497  
   498  	for _, bd := range blockDatas {
   499  		bucketVersions, expected := newTestBufferBucketVersionsWithCustomData(t, bd, opts, setAnn)
   500  		buffer.bucketsMap[bd.start] = bucketVersions
   501  		expectedMap[bd.start] = expected
   502  	}
   503  
   504  	return buffer, expectedMap
   505  }
   506  
   507  func TestBufferBucketMerge(t *testing.T) {
   508  	opts := newBufferTestOptions()
   509  
   510  	testBufferBucketMerge(t, opts, nil)
   511  }
   512  
   513  func testBufferBucketMerge(t *testing.T, opts Options, setAnn setAnnotation) {
   514  	b, expected := newTestBufferBucketWithData(t, opts, setAnn)
   515  
   516  	ctx := context.NewBackground()
   517  	defer ctx.Close()
   518  
   519  	nsCtx := namespace.Context{}
   520  	if setAnn != nil {
   521  		nsCtx.Schema = testSchemaDesc
   522  	}
   523  	sr, ok, err := b.mergeToStream(ctx, nsCtx)
   524  
   525  	require.NoError(t, err)
   526  	require.True(t, ok)
   527  
   528  	requireReaderValuesEqual(t, expected, [][]xio.BlockReader{{
   529  		{
   530  			SegmentReader: sr,
   531  		},
   532  	}}, opts, nsCtx)
   533  }
   534  
   535  func TestBufferBucketMergeNilEncoderStreams(t *testing.T) {
   536  	opts := newBufferTestOptions()
   537  	ropts := opts.RetentionOptions()
   538  	curr := xtime.Now().Truncate(ropts.BlockSize())
   539  
   540  	b := &BufferBucket{}
   541  	b.resetTo(curr, WarmWrite, opts)
   542  	emptyEncoder := opts.EncoderPool().Get()
   543  	emptyEncoder.Reset(curr, 0, nil)
   544  	b.encoders = append(b.encoders, inOrderEncoder{encoder: emptyEncoder})
   545  
   546  	ctx := opts.ContextPool().Get()
   547  	defer ctx.Close()
   548  
   549  	_, ok := b.encoders[0].encoder.Stream(ctx)
   550  	require.False(t, ok)
   551  
   552  	encoder := opts.EncoderPool().Get()
   553  	encoder.Reset(curr, 0, nil)
   554  
   555  	value := ts.Datapoint{TimestampNanos: curr, Value: 1.0}
   556  	err := encoder.Encode(value, xtime.Second, nil)
   557  	require.NoError(t, err)
   558  
   559  	blopts := opts.DatabaseBlockOptions()
   560  	newBlock := block.NewDatabaseBlock(curr, 0, encoder.Discard(), blopts, namespace.Context{})
   561  	b.loadedBlocks = append(b.loadedBlocks, newBlock)
   562  
   563  	stream, err := b.loadedBlocks[0].Stream(ctx)
   564  	require.NoError(t, err)
   565  	require.NotNil(t, stream)
   566  
   567  	mergeRes, err := b.merge(namespace.Context{})
   568  	require.NoError(t, err)
   569  	assert.Equal(t, 1, mergeRes)
   570  	assert.Equal(t, 1, len(b.encoders))
   571  	assert.Equal(t, 0, len(b.loadedBlocks))
   572  }
   573  
   574  func TestBufferBucketWriteDuplicateUpserts(t *testing.T) {
   575  	opts := newBufferTestOptions()
   576  	rops := opts.RetentionOptions()
   577  	curr := xtime.Now().Truncate(rops.BlockSize())
   578  
   579  	b := &BufferBucket{}
   580  	b.resetTo(curr, WarmWrite, opts)
   581  
   582  	data := [][]DecodedTestValue{
   583  		{
   584  			{curr, 1, xtime.Second, nil},
   585  			{curr.Add(secs(10)), 2, xtime.Second, nil},
   586  			{curr.Add(secs(50)), 3, xtime.Second, nil},
   587  			{curr.Add(secs(50)), 4, xtime.Second, nil},
   588  		},
   589  		{
   590  			{curr.Add(secs(10)), 5, xtime.Second, nil},
   591  			{curr.Add(secs(40)), 6, xtime.Second, nil},
   592  			{curr.Add(secs(60)), 7, xtime.Second, nil},
   593  		},
   594  		{
   595  			{curr.Add(secs(40)), 8, xtime.Second, nil},
   596  			{curr.Add(secs(70)), 9, xtime.Second, nil},
   597  		},
   598  		{
   599  			{curr.Add(secs(10)), 10, xtime.Second, nil},
   600  			{curr.Add(secs(80)), 11, xtime.Second, nil},
   601  		},
   602  	}
   603  
   604  	expected := []DecodedTestValue{
   605  		{curr, 1, xtime.Second, nil},
   606  		{curr.Add(secs(10)), 10, xtime.Second, nil},
   607  		{curr.Add(secs(40)), 8, xtime.Second, nil},
   608  		{curr.Add(secs(50)), 4, xtime.Second, nil},
   609  		{curr.Add(secs(60)), 7, xtime.Second, nil},
   610  		{curr.Add(secs(70)), 9, xtime.Second, nil},
   611  		{curr.Add(secs(80)), 11, xtime.Second, nil},
   612  	}
   613  
   614  	for _, values := range data {
   615  		for _, value := range values {
   616  			wasWritten, err := b.write(value.Timestamp, value.Value,
   617  				value.Unit, value.Annotation, nil)
   618  			require.NoError(t, err)
   619  			require.True(t, wasWritten)
   620  		}
   621  	}
   622  
   623  	// First assert that streams() call is correct.
   624  	ctx := context.NewBackground()
   625  
   626  	result := b.streams(ctx)
   627  	require.NotNil(t, result)
   628  
   629  	results := [][]xio.BlockReader{result}
   630  
   631  	requireReaderValuesEqual(t, expected, results, opts, namespace.Context{})
   632  
   633  	// Now assert that mergeToStream() returns same expected result.
   634  	stream, ok, err := b.mergeToStream(ctx, namespace.Context{})
   635  	require.NoError(t, err)
   636  	require.True(t, ok)
   637  	requireSegmentValuesEqual(t, expected, []xio.SegmentReader{stream}, opts, namespace.Context{})
   638  }
   639  
   640  func TestBufferBucketDuplicatePointsNotWrittenButUpserted(t *testing.T) {
   641  	opts := newBufferTestOptions()
   642  	rops := opts.RetentionOptions()
   643  	curr := xtime.Now().Truncate(rops.BlockSize())
   644  
   645  	b := &BufferBucket{opts: opts}
   646  	b.resetTo(curr, WarmWrite, opts)
   647  
   648  	type dataWithShouldWrite struct {
   649  		v DecodedTestValue
   650  		w bool
   651  	}
   652  
   653  	data := [][]dataWithShouldWrite{
   654  		{
   655  			{w: true, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   656  			{w: false, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   657  			{w: false, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   658  			{w: false, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   659  			{w: true, v: DecodedTestValue{curr.Add(secs(10)), 2, xtime.Second, nil}},
   660  		},
   661  		{
   662  			{w: true, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   663  			{w: false, v: DecodedTestValue{curr.Add(secs(10)), 2, xtime.Second, nil}},
   664  			{w: true, v: DecodedTestValue{curr.Add(secs(10)), 5, xtime.Second, nil}},
   665  		},
   666  		{
   667  			{w: true, v: DecodedTestValue{curr, 1, xtime.Second, nil}},
   668  			{w: true, v: DecodedTestValue{curr.Add(secs(20)), 8, xtime.Second, nil}},
   669  		},
   670  		{
   671  			{w: true, v: DecodedTestValue{curr, 10, xtime.Second, nil}},
   672  			{w: true, v: DecodedTestValue{curr.Add(secs(20)), 10, xtime.Second, nil}},
   673  		},
   674  	}
   675  
   676  	expected := []DecodedTestValue{
   677  		{curr, 10, xtime.Second, nil},
   678  		{curr.Add(secs(10)), 5, xtime.Second, nil},
   679  		{curr.Add(secs(20)), 10, xtime.Second, nil},
   680  	}
   681  
   682  	for _, valuesWithMeta := range data {
   683  		for _, valueWithMeta := range valuesWithMeta {
   684  			value := valueWithMeta.v
   685  			wasWritten, err := b.write(value.Timestamp, value.Value,
   686  				value.Unit, value.Annotation, nil)
   687  			require.NoError(t, err)
   688  			assert.Equal(t, valueWithMeta.w, wasWritten)
   689  		}
   690  	}
   691  
   692  	// First assert that Streams() call is correct.
   693  	ctx := context.NewBackground()
   694  	defer ctx.Close()
   695  
   696  	result := b.streams(ctx)
   697  	require.NotNil(t, result)
   698  
   699  	results := [][]xio.BlockReader{result}
   700  
   701  	requireReaderValuesEqual(t, expected, results, opts, namespace.Context{})
   702  
   703  	// Now assert that mergeToStream() returns same expected result.
   704  	stream, ok, err := b.mergeToStream(ctx, namespace.Context{})
   705  	require.NoError(t, err)
   706  	require.True(t, ok)
   707  	requireSegmentValuesEqual(t, expected, []xio.SegmentReader{stream}, opts, namespace.Context{})
   708  }
   709  
   710  func TestIndexedBufferWriteOnlyWritesSinglePoint(t *testing.T) {
   711  	opts := newBufferTestOptions()
   712  	rops := opts.RetentionOptions()
   713  	curr := xtime.Now().Truncate(rops.BlockSize())
   714  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   715  		return curr.ToTime()
   716  	}))
   717  	buffer := newDatabaseBuffer().(*dbBuffer)
   718  	buffer.Reset(databaseBufferResetOptions{
   719  		Options: opts,
   720  	})
   721  
   722  	data := []DecodedTestValue{
   723  		{curr.Add(secs(1)), 1, xtime.Second, nil},
   724  		{curr.Add(secs(2)), 2, xtime.Second, nil},
   725  		{curr.Add(secs(3)), 3, xtime.Second, nil},
   726  	}
   727  
   728  	forceValue := 1.0
   729  	for i, v := range data {
   730  		ctx := context.NewBackground()
   731  		writeOpts := WriteOptions{
   732  			TruncateType: TypeBlock,
   733  			TransformOptions: WriteTransformOptions{
   734  				ForceValueEnabled: true,
   735  				ForceValue:        forceValue,
   736  			},
   737  		}
   738  		wasWritten, _, err := buffer.Write(ctx, testID,
   739  			v.Timestamp, v.Value, v.Unit,
   740  			v.Annotation, writeOpts)
   741  		require.NoError(t, err)
   742  		expectedWrite := i == 0
   743  		require.Equal(t, expectedWrite, wasWritten)
   744  		ctx.Close()
   745  	}
   746  
   747  	ctx := context.NewBackground()
   748  	defer ctx.Close()
   749  
   750  	results, err := buffer.ReadEncoded(ctx, 0, timeDistantFuture, namespace.Context{})
   751  	assert.NoError(t, err)
   752  	assert.NotNil(t, results)
   753  
   754  	ex := []DecodedTestValue{
   755  		{curr, forceValue, xtime.Second, nil},
   756  	}
   757  
   758  	requireReaderValuesEqual(t, ex, results, opts, namespace.Context{})
   759  }
   760  
   761  func TestBufferFetchBlocks(t *testing.T) {
   762  	opts := newBufferTestOptions()
   763  	testBufferFetchBlocks(t, opts, nil)
   764  }
   765  
   766  func testBufferFetchBlocks(t *testing.T, opts Options, setAnn setAnnotation) {
   767  	b, expected := newTestBufferBucketsWithData(t, opts, setAnn)
   768  	ctx := opts.ContextPool().Get()
   769  	defer ctx.Close()
   770  
   771  	buffer := newDatabaseBuffer().(*dbBuffer)
   772  	buffer.Reset(databaseBufferResetOptions{
   773  		Options: opts,
   774  	})
   775  	buffer.bucketsMap[b.start] = b
   776  
   777  	nsCtx := namespace.Context{}
   778  	if setAnn != nil {
   779  		nsCtx.Schema = testSchemaDesc
   780  	}
   781  	res := buffer.FetchBlocks(ctx, []xtime.UnixNano{b.start}, nsCtx)
   782  	require.Equal(t, 1, len(res))
   783  	require.Equal(t, b.start, res[0].Start)
   784  	requireReaderValuesEqual(t, expected, [][]xio.BlockReader{res[0].Blocks}, opts, nsCtx)
   785  }
   786  
   787  func TestBufferFetchBlocksOneResultPerBlock(t *testing.T) {
   788  	opts := newBufferTestOptions()
   789  	opts.SetColdWritesEnabled(true)
   790  	rOpts := opts.RetentionOptions()
   791  	curr := xtime.Now().Truncate(rOpts.BlockSize())
   792  
   793  	// Set up buffer such that there is a warm and cold bucket for the same
   794  	// block. After we run FetchBlocks, we should see one result per block,
   795  	// even though there are multiple bucket versions with the same block.
   796  	warmBucket := &BufferBucket{opts: opts}
   797  	warmBucket.resetTo(curr, WarmWrite, opts)
   798  	warmBucket.encoders = nil
   799  	coldBucket := &BufferBucket{opts: opts}
   800  	coldBucket.resetTo(curr, ColdWrite, opts)
   801  	coldBucket.encoders = nil
   802  	buckets := []*BufferBucket{warmBucket, coldBucket}
   803  	warmEncoder := [][]DecodedTestValue{
   804  		{
   805  			{curr, 1, xtime.Second, nil},
   806  			{curr.Add(secs(10)), 2, xtime.Second, nil},
   807  			{curr.Add(secs(50)), 3, xtime.Second, nil},
   808  		},
   809  		{
   810  			{curr.Add(secs(20)), 4, xtime.Second, nil},
   811  			{curr.Add(secs(40)), 5, xtime.Second, nil},
   812  			{curr.Add(secs(60)), 6, xtime.Second, nil},
   813  		},
   814  		{
   815  			{curr.Add(secs(30)), 4, xtime.Second, nil},
   816  			{curr.Add(secs(70)), 5, xtime.Second, nil},
   817  		},
   818  		{
   819  			{curr.Add(secs(35)), 6, xtime.Second, nil},
   820  		},
   821  	}
   822  	coldEncoder := [][]DecodedTestValue{
   823  		{
   824  			{curr.Add(secs(15)), 10, xtime.Second, nil},
   825  			{curr.Add(secs(25)), 20, xtime.Second, nil},
   826  			{curr.Add(secs(40)), 30, xtime.Second, nil},
   827  		},
   828  	}
   829  	data := [][][]DecodedTestValue{warmEncoder, coldEncoder}
   830  
   831  	for i, bucket := range data {
   832  		for _, d := range bucket {
   833  			encoded := 0
   834  			encoder := opts.EncoderPool().Get()
   835  			encoder.Reset(curr, 0, nil)
   836  			for _, v := range d {
   837  				dp := ts.Datapoint{
   838  					TimestampNanos: v.Timestamp,
   839  					Value:          v.Value,
   840  				}
   841  				err := encoder.Encode(dp, v.Unit, v.Annotation)
   842  				require.NoError(t, err)
   843  				encoded++
   844  			}
   845  			buckets[i].encoders = append(buckets[i].encoders, inOrderEncoder{encoder: encoder})
   846  		}
   847  	}
   848  
   849  	b := &BufferBucketVersions{
   850  		buckets: buckets,
   851  	}
   852  	ctx := opts.ContextPool().Get()
   853  	defer ctx.Close()
   854  
   855  	buffer := newDatabaseBuffer().(*dbBuffer)
   856  	buffer.Reset(databaseBufferResetOptions{
   857  		Options: opts,
   858  	})
   859  	buffer.bucketsMap[b.start] = b
   860  
   861  	res := buffer.FetchBlocks(ctx, []xtime.UnixNano{
   862  		b.start,
   863  		b.start.Add(time.Second),
   864  	}, namespace.Context{})
   865  	require.Equal(t, 1, len(res))
   866  	require.Equal(t, b.start, res[0].Start)
   867  	require.Equal(t, 5, len(res[0].Blocks))
   868  }
   869  
   870  func TestBufferFetchBlocksMetadata(t *testing.T) {
   871  	opts := newBufferTestOptions()
   872  
   873  	b, _ := newTestBufferBucketsWithData(t, opts, nil)
   874  
   875  	expectedLastRead := xtime.Now()
   876  	b.lastReadUnixNanos = int64(expectedLastRead)
   877  	ctx := opts.ContextPool().Get()
   878  	defer ctx.Close()
   879  
   880  	start := b.start.Add(-time.Second)
   881  	end := b.start.Add(time.Second)
   882  
   883  	buffer := newDatabaseBuffer().(*dbBuffer)
   884  	buffer.Reset(databaseBufferResetOptions{
   885  		Options: opts,
   886  	})
   887  	buffer.bucketsMap[b.start] = b
   888  	buffer.inOrderBlockStarts = append(buffer.inOrderBlockStarts, b.start)
   889  
   890  	expectedSize := int64(b.streamsLen())
   891  
   892  	fetchOpts := FetchBlocksMetadataOptions{
   893  		FetchBlocksMetadataOptions: block.FetchBlocksMetadataOptions{
   894  			IncludeSizes:     true,
   895  			IncludeChecksums: true,
   896  			IncludeLastRead:  true,
   897  		},
   898  	}
   899  	metadata, err := buffer.FetchBlocksMetadata(ctx, start, end, fetchOpts)
   900  	require.NoError(t, err)
   901  	res := metadata.Results()
   902  	require.Equal(t, 1, len(res))
   903  	require.Equal(t, b.start, res[0].Start)
   904  	require.Equal(t, expectedSize, res[0].Size)
   905  	// Checksum not available since there are multiple streams.
   906  	require.Equal(t, (*uint32)(nil), res[0].Checksum)
   907  	require.True(t, expectedLastRead.Equal(res[0].LastRead))
   908  
   909  	// Tick to merge all of the streams into one.
   910  	buffer.Tick(ShardBlockStateSnapshot{}, namespace.Context{})
   911  	metadata, err = buffer.FetchBlocksMetadata(ctx, start, end, fetchOpts)
   912  	require.NoError(t, err)
   913  	res = metadata.Results()
   914  	require.Equal(t, 1, len(res))
   915  	// Checksum should be available now since there was only one stream.
   916  	require.NotNil(t, res[0].Checksum)
   917  }
   918  
   919  func TestBufferTickReordersOutOfOrderBuffers(t *testing.T) {
   920  	ctrl := gomock.NewController(t)
   921  	defer ctrl.Finish()
   922  
   923  	ctx := context.NewBackground()
   924  	defer ctx.Close()
   925  
   926  	opts := newBufferTestOptions()
   927  	rops := opts.RetentionOptions()
   928  	curr := xtime.Now().Truncate(rops.BlockSize())
   929  	start := curr
   930  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
   931  		return curr.ToTime()
   932  	}))
   933  	buffer := newDatabaseBuffer().(*dbBuffer)
   934  	buffer.Reset(databaseBufferResetOptions{
   935  		Options: opts,
   936  	})
   937  
   938  	// Perform out of order writes that will create two in order encoders.
   939  	data := []DecodedTestValue{
   940  		{curr, 1, xtime.Second, nil},
   941  		{curr.Add(mins(0.5)), 2, xtime.Second, nil},
   942  		{curr.Add(mins(0.5)).Add(-5 * time.Second), 3, xtime.Second, nil},
   943  		{curr.Add(mins(1.0)), 4, xtime.Second, nil},
   944  		{curr.Add(mins(1.5)), 5, xtime.Second, nil},
   945  		{curr.Add(mins(1.5)).Add(-5 * time.Second), 6, xtime.Second, nil},
   946  	}
   947  	end := data[len(data)-1].Timestamp.Add(time.Nanosecond)
   948  
   949  	for _, v := range data {
   950  		curr = v.Timestamp
   951  		verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
   952  	}
   953  
   954  	var encoders []encoding.Encoder
   955  	for _, buckets := range buffer.bucketsMap {
   956  		bucket, ok := buckets.writableBucket(WarmWrite)
   957  		require.True(t, ok)
   958  		// Current bucket encoders should all have data in them.
   959  		for j := range bucket.encoders {
   960  			encoder := bucket.encoders[j].encoder
   961  
   962  			_, ok := encoder.Stream(ctx)
   963  			require.True(t, ok)
   964  
   965  			encoders = append(encoders, encoder)
   966  		}
   967  	}
   968  
   969  	assert.Equal(t, 2, len(encoders))
   970  
   971  	blockStates := BootstrappedBlockStateSnapshot{
   972  		Snapshot: map[xtime.UnixNano]BlockState{
   973  			start: {
   974  				WarmRetrievable: true,
   975  				ColdVersion:     1,
   976  			},
   977  		},
   978  	}
   979  	shardBlockState := NewShardBlockStateSnapshot(true, blockStates)
   980  	// Perform a tick and ensure merged out of order blocks.
   981  	r := buffer.Tick(shardBlockState, namespace.Context{})
   982  	assert.Equal(t, 1, r.mergedOutOfOrderBlocks)
   983  
   984  	// Check values correct.
   985  	results, err := buffer.ReadEncoded(ctx, start, end, namespace.Context{})
   986  	assert.NoError(t, err)
   987  	expected := make([]DecodedTestValue, len(data))
   988  	copy(expected, data)
   989  	sort.Sort(ValuesByTime(expected))
   990  	requireReaderValuesEqual(t, expected, results, opts, namespace.Context{})
   991  
   992  	// Count the encoders again.
   993  	encoders = encoders[:0]
   994  	buckets, ok := buffer.bucketVersionsAt(start)
   995  	require.True(t, ok)
   996  	bucket, ok := buckets.writableBucket(WarmWrite)
   997  	require.True(t, ok)
   998  	// Current bucket encoders should all have data in them.
   999  	for j := range bucket.encoders {
  1000  		encoder := bucket.encoders[j].encoder
  1001  
  1002  		_, ok := encoder.Stream(ctx)
  1003  		require.True(t, ok)
  1004  
  1005  		encoders = append(encoders, encoder)
  1006  	}
  1007  
  1008  	// Ensure single encoder again.
  1009  	assert.Equal(t, 1, len(encoders))
  1010  }
  1011  
  1012  func TestBufferRemoveBucket(t *testing.T) {
  1013  	ctrl := gomock.NewController(t)
  1014  	defer ctrl.Finish()
  1015  
  1016  	opts := newBufferTestOptions()
  1017  	rops := opts.RetentionOptions()
  1018  	curr := xtime.Now().Truncate(rops.BlockSize())
  1019  	start := curr
  1020  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  1021  		return curr.ToTime()
  1022  	}))
  1023  	buffer := newDatabaseBuffer().(*dbBuffer)
  1024  	buffer.Reset(databaseBufferResetOptions{
  1025  		Options: opts,
  1026  	})
  1027  
  1028  	// Perform out of order writes that will create two in order encoders.
  1029  	data := []DecodedTestValue{
  1030  		{curr, 1, xtime.Second, nil},
  1031  		{curr.Add(mins(0.5)), 2, xtime.Second, nil},
  1032  		{curr.Add(mins(0.5)).Add(-5 * time.Second), 3, xtime.Second, nil},
  1033  		{curr.Add(mins(1.0)), 4, xtime.Second, nil},
  1034  		{curr.Add(mins(1.5)), 5, xtime.Second, nil},
  1035  		{curr.Add(mins(1.5)).Add(-5 * time.Second), 6, xtime.Second, nil},
  1036  	}
  1037  
  1038  	for _, v := range data {
  1039  		curr = v.Timestamp
  1040  		verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
  1041  	}
  1042  
  1043  	buckets, exists := buffer.bucketVersionsAt(start)
  1044  	require.True(t, exists)
  1045  	bucket, exists := buckets.writableBucket(WarmWrite)
  1046  	require.True(t, exists)
  1047  
  1048  	// Simulate that a flush has fully completed on this bucket so that it will.
  1049  	// get removed from the bucket.
  1050  	blockStates := BootstrappedBlockStateSnapshot{
  1051  		Snapshot: map[xtime.UnixNano]BlockState{
  1052  			start: {
  1053  				WarmRetrievable: true,
  1054  				ColdVersion:     1,
  1055  			},
  1056  		},
  1057  	}
  1058  	shardBlockState := NewShardBlockStateSnapshot(true, blockStates)
  1059  	bucket.version = 1
  1060  
  1061  	// False because we just wrote to it.
  1062  	assert.False(t, buffer.IsEmpty())
  1063  	// Perform a tick to remove the bucket which has been flushed.
  1064  	buffer.Tick(shardBlockState, namespace.Context{})
  1065  	// True because we just removed the bucket.
  1066  	assert.True(t, buffer.IsEmpty())
  1067  }
  1068  
  1069  func TestBuffertoStream(t *testing.T) {
  1070  	opts := newBufferTestOptions()
  1071  
  1072  	testBuffertoStream(t, opts, nil)
  1073  }
  1074  
  1075  func testBuffertoStream(t *testing.T, opts Options, setAnn setAnnotation) {
  1076  	b, expected := newTestBufferBucketsWithData(t, opts, setAnn)
  1077  	ctx := opts.ContextPool().Get()
  1078  	defer ctx.Close()
  1079  	nsCtx := namespace.Context{}
  1080  	if setAnn != nil {
  1081  		nsCtx.Schema = testSchemaDesc
  1082  	}
  1083  
  1084  	bucket, exists := b.writableBucket(WarmWrite)
  1085  	require.True(t, exists)
  1086  	assert.Len(t, bucket.encoders, 4)
  1087  	assert.Len(t, bucket.loadedBlocks, 0)
  1088  
  1089  	stream, err := b.mergeToStreams(ctx, streamsOptions{filterWriteType: false, nsCtx: nsCtx})
  1090  	require.NoError(t, err)
  1091  	requireSegmentValuesEqual(t, expected, stream, opts, nsCtx)
  1092  }
  1093  
  1094  // TestBufferSnapshotEmptyEncoder ensures that snapshot behaves correctly even if an
  1095  // encoder is present but it has no data which can occur in some situations such as when
  1096  // an initial write fails leaving behind an empty encoder.
  1097  func TestBufferSnapshotEmptyEncoder(t *testing.T) {
  1098  	testBufferWithEmptyEncoder(t, true)
  1099  }
  1100  
  1101  // TestBufferFlushEmptyEncoder ensures that flush behaves correctly even if an encoder
  1102  // is present but it has no data which can occur in some situations such as when an
  1103  // initial write fails leaving behind an empty encoder.
  1104  func TestBufferFlushEmptyEncoder(t *testing.T) {
  1105  	testBufferWithEmptyEncoder(t, false)
  1106  }
  1107  
  1108  func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) {
  1109  	// Setup.
  1110  	var (
  1111  		opts      = newBufferTestOptions()
  1112  		rops      = opts.RetentionOptions()
  1113  		blockSize = rops.BlockSize()
  1114  		curr      = xtime.Now().Truncate(blockSize)
  1115  		start     = curr
  1116  		buffer    = newDatabaseBuffer().(*dbBuffer)
  1117  	)
  1118  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  1119  		return curr.ToTime()
  1120  	}))
  1121  	buffer.Reset(databaseBufferResetOptions{
  1122  		Options: opts,
  1123  	})
  1124  
  1125  	// Perform one valid write to setup the state of the buffer.
  1126  	ctx := context.NewBackground()
  1127  	defer ctx.Close()
  1128  
  1129  	wasWritten, _, err := buffer.Write(ctx, testID,
  1130  		curr, 1, xtime.Second, nil, WriteOptions{})
  1131  	require.NoError(t, err)
  1132  	require.True(t, wasWritten)
  1133  
  1134  	// Verify internal state.
  1135  	var encoders []encoding.Encoder
  1136  	buckets, ok := buffer.bucketVersionsAt(start)
  1137  	require.True(t, ok)
  1138  	bucket, ok := buckets.writableBucket(WarmWrite)
  1139  	require.True(t, ok)
  1140  	for j := range bucket.encoders {
  1141  		encoder := bucket.encoders[j].encoder
  1142  
  1143  		_, ok := encoder.Stream(ctx)
  1144  		require.True(t, ok)
  1145  
  1146  		// Reset the encoder to simulate the situation in which an encoder is present but
  1147  		// it is empty.
  1148  		encoder.Reset(curr, 0, nil)
  1149  
  1150  		encoders = append(encoders, encoder)
  1151  	}
  1152  	require.Equal(t, 1, len(encoders))
  1153  
  1154  	assertPersistDataFn := func(persist.Metadata, ts.Segment, uint32) error {
  1155  		t.Fatal("persist fn should not have been called")
  1156  		return nil
  1157  	}
  1158  
  1159  	metadata := persist.NewMetadata(doc.Metadata{
  1160  		ID: []byte("some-id"),
  1161  	})
  1162  
  1163  	if testSnapshot {
  1164  		ctx = context.NewBackground()
  1165  		defer ctx.Close()
  1166  
  1167  		_, err = buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, namespace.Context{})
  1168  		assert.NoError(t, err)
  1169  	} else {
  1170  		ctx = context.NewBackground()
  1171  		defer ctx.Close()
  1172  		_, err = buffer.WarmFlush(
  1173  			ctx, start, metadata, assertPersistDataFn, namespace.Context{})
  1174  		require.NoError(t, err)
  1175  	}
  1176  }
  1177  
  1178  func TestBufferSnapshot(t *testing.T) {
  1179  	opts := newBufferTestOptions()
  1180  	testBufferSnapshot(t, opts, nil)
  1181  }
  1182  
  1183  func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
  1184  	// Setup
  1185  	var (
  1186  		rops      = opts.RetentionOptions()
  1187  		blockSize = rops.BlockSize()
  1188  		curr      = xtime.Now().Truncate(blockSize)
  1189  		start     = curr
  1190  		buffer    = newDatabaseBuffer().(*dbBuffer)
  1191  		nsCtx     namespace.Context
  1192  	)
  1193  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  1194  		return curr.ToTime()
  1195  	}))
  1196  
  1197  	ctx := context.NewBackground()
  1198  	defer ctx.Close()
  1199  
  1200  	buffer.Reset(databaseBufferResetOptions{
  1201  		Options: opts,
  1202  	})
  1203  
  1204  	// Create test data to perform out of order writes that will create two in-order
  1205  	// encoders so we can verify that Snapshot will perform a merge.
  1206  	data := []DecodedTestValue{
  1207  		{curr, 1, xtime.Second, nil},
  1208  		{curr.Add(mins(0.5)), 2, xtime.Second, nil},
  1209  		{curr.Add(mins(0.5)).Add(-5 * time.Second), 3, xtime.Second, nil},
  1210  		{curr.Add(mins(1.0)), 4, xtime.Second, nil},
  1211  		{curr.Add(mins(1.5)), 5, xtime.Second, nil},
  1212  		{curr.Add(mins(1.5)).Add(-5 * time.Second), 6, xtime.Second, nil},
  1213  
  1214  		// Add one write for a different block to make sure Snapshot only returns
  1215  		// date for the requested block.
  1216  		{curr.Add(blockSize), 6, xtime.Second, nil},
  1217  	}
  1218  	if setAnn != nil {
  1219  		data = setAnn(data)
  1220  		nsCtx = namespace.Context{Schema: testSchemaDesc}
  1221  	}
  1222  
  1223  	// Perform the writes.
  1224  	for _, v := range data {
  1225  		curr = v.Timestamp
  1226  		verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
  1227  	}
  1228  
  1229  	// Verify internal state.
  1230  	var encoders []encoding.Encoder
  1231  
  1232  	buckets, ok := buffer.bucketVersionsAt(start)
  1233  	require.True(t, ok)
  1234  	bucket, ok := buckets.writableBucket(WarmWrite)
  1235  	require.True(t, ok)
  1236  	// Current bucket encoders should all have data in them.
  1237  	for j := range bucket.encoders {
  1238  		encoder := bucket.encoders[j].encoder
  1239  
  1240  		_, ok := encoder.Stream(ctx)
  1241  		require.True(t, ok)
  1242  
  1243  		encoders = append(encoders, encoder)
  1244  	}
  1245  
  1246  	assert.Equal(t, 2, len(encoders))
  1247  
  1248  	assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error {
  1249  		// Check we got the right results.
  1250  		expectedData := data[:len(data)-1] // -1 because we don't expect the last datapoint.
  1251  		expectedCopy := make([]DecodedTestValue, len(expectedData))
  1252  		copy(expectedCopy, expectedData)
  1253  		sort.Sort(ValuesByTime(expectedCopy))
  1254  		actual := [][]xio.BlockReader{{
  1255  			xio.BlockReader{
  1256  				SegmentReader: xio.NewSegmentReader(segment),
  1257  			},
  1258  		}}
  1259  		requireReaderValuesEqual(t, expectedCopy, actual, opts, nsCtx)
  1260  
  1261  		return nil
  1262  	}
  1263  
  1264  	// Perform a snapshot.
  1265  	metadata := persist.NewMetadata(doc.Metadata{
  1266  		ID: []byte("some-id"),
  1267  	})
  1268  
  1269  	_, err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx)
  1270  	assert.NoError(t, err)
  1271  
  1272  	// Check internal state to make sure the merge happened and was persisted.
  1273  	encoders = encoders[:0]
  1274  	buckets, ok = buffer.bucketVersionsAt(start)
  1275  	require.True(t, ok)
  1276  	bucket, ok = buckets.writableBucket(WarmWrite)
  1277  	require.True(t, ok)
  1278  	// Current bucket encoders should all have data in them.
  1279  	for i := range bucket.encoders {
  1280  		encoder := bucket.encoders[i].encoder
  1281  
  1282  		_, ok := encoder.Stream(ctx)
  1283  		require.True(t, ok)
  1284  
  1285  		encoders = append(encoders, encoder)
  1286  	}
  1287  
  1288  	// Ensure single encoder again.
  1289  	assert.Equal(t, 1, len(encoders))
  1290  }
  1291  
  1292  func TestBufferSnapshotWithColdWrites(t *testing.T) {
  1293  	opts := newBufferTestOptions().SetColdWritesEnabled(true)
  1294  
  1295  	var (
  1296  		rops      = opts.RetentionOptions()
  1297  		blockSize = rops.BlockSize()
  1298  		curr      = xtime.Now().Truncate(blockSize)
  1299  		start     = curr
  1300  		buffer    = newDatabaseBuffer().(*dbBuffer)
  1301  		nsCtx     namespace.Context
  1302  	)
  1303  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  1304  		return curr.ToTime()
  1305  	}))
  1306  	buffer.Reset(databaseBufferResetOptions{
  1307  		Options: opts,
  1308  	})
  1309  
  1310  	// Create test data to perform warm writes that will create two in-order
  1311  	// encoders so we can verify that Snapshot will perform a merge.
  1312  	warmData := []DecodedTestValue{
  1313  		{curr, 1, xtime.Second, nil},
  1314  		{curr.Add(mins(0.5)), 2, xtime.Second, nil},
  1315  		{curr.Add(mins(0.5)).Add(-5 * time.Second), 3, xtime.Second, nil},
  1316  		{curr.Add(mins(1.0)), 4, xtime.Second, nil},
  1317  		{curr.Add(mins(1.5)), 5, xtime.Second, nil},
  1318  		{curr.Add(mins(1.5)).Add(-5 * time.Second), 6, xtime.Second, nil},
  1319  
  1320  		// Add one write for a different block to make sure Snapshot only returns
  1321  		// date for the requested block.
  1322  		{curr.Add(blockSize), 6, xtime.Second, nil},
  1323  	}
  1324  
  1325  	// Perform warm writes.
  1326  	for _, v := range warmData {
  1327  		// Set curr so that every write is a warm write.
  1328  		curr = v.Timestamp
  1329  		verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
  1330  	}
  1331  
  1332  	// Also add cold writes to the buffer to verify that Snapshot will capture
  1333  	// cold writes as well and perform a merge across both warm and cold data.
  1334  	// The cold data itself is not in order, so we expect to have two in-order
  1335  	// encoders for these.
  1336  	curr = start.Add(mins(1.5))
  1337  	// In order for these writes to actually be cold, they all need to have
  1338  	// timestamps before `curr.Add(-rops.BufferPast())`. Take care to not use
  1339  	// the same timestamps used in the warm writes above, otherwise these will
  1340  	// overwrite them.
  1341  	// Buffer past/future in this test case is 10 seconds.
  1342  	coldData := []DecodedTestValue{
  1343  		{start.Add(secs(2)), 11, xtime.Second, nil},
  1344  		{start.Add(secs(4)), 12, xtime.Second, nil},
  1345  		{start.Add(secs(6)), 13, xtime.Second, nil},
  1346  		{start.Add(secs(3)), 14, xtime.Second, nil},
  1347  		{start.Add(secs(5)), 15, xtime.Second, nil},
  1348  	}
  1349  
  1350  	// Perform cold writes.
  1351  	for _, v := range coldData {
  1352  		verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
  1353  	}
  1354  
  1355  	// Verify internal state.
  1356  	var (
  1357  		warmEncoders []encoding.Encoder
  1358  		coldEncoders []encoding.Encoder
  1359  	)
  1360  	ctx := context.NewBackground()
  1361  	defer ctx.Close()
  1362  
  1363  	buckets, ok := buffer.bucketVersionsAt(start)
  1364  	require.True(t, ok)
  1365  
  1366  	bucket, ok := buckets.writableBucket(WarmWrite)
  1367  	require.True(t, ok)
  1368  	// Warm bucket encoders should all have data in them.
  1369  	for j := range bucket.encoders {
  1370  		encoder := bucket.encoders[j].encoder
  1371  
  1372  		_, ok := encoder.Stream(ctx)
  1373  		require.True(t, ok)
  1374  
  1375  		warmEncoders = append(warmEncoders, encoder)
  1376  	}
  1377  	assert.Equal(t, 2, len(warmEncoders))
  1378  
  1379  	bucket, ok = buckets.writableBucket(ColdWrite)
  1380  	require.True(t, ok)
  1381  	// Cold bucket encoders should all have data in them.
  1382  	for j := range bucket.encoders {
  1383  		encoder := bucket.encoders[j].encoder
  1384  
  1385  		_, ok := encoder.Stream(ctx)
  1386  		require.True(t, ok)
  1387  
  1388  		coldEncoders = append(coldEncoders, encoder)
  1389  	}
  1390  	assert.Equal(t, 2, len(coldEncoders))
  1391  
  1392  	assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error {
  1393  		// Check we got the right results.
  1394  		// `len(warmData)-1` because we don't expect the last warm datapoint
  1395  		// since it's for a different block.
  1396  		expectedData := warmData[:len(warmData)-1]
  1397  		expectedData = append(expectedData, coldData...)
  1398  		expectedCopy := make([]DecodedTestValue, len(expectedData))
  1399  		copy(expectedCopy, expectedData)
  1400  		sort.Sort(ValuesByTime(expectedCopy))
  1401  		actual := [][]xio.BlockReader{{
  1402  			xio.BlockReader{
  1403  				SegmentReader: xio.NewSegmentReader(segment),
  1404  			},
  1405  		}}
  1406  		requireReaderValuesEqual(t, expectedCopy, actual, opts, nsCtx)
  1407  
  1408  		return nil
  1409  	}
  1410  
  1411  	// Perform a snapshot.
  1412  	metadata := persist.NewMetadata(doc.Metadata{
  1413  		ID: []byte("some-id"),
  1414  	})
  1415  
  1416  	_, err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx)
  1417  	require.NoError(t, err)
  1418  
  1419  	// Check internal state of warm bucket to make sure the merge happened and
  1420  	// was persisted.
  1421  	warmEncoders = warmEncoders[:0]
  1422  	buckets, ok = buffer.bucketVersionsAt(start)
  1423  	require.True(t, ok)
  1424  	bucket, ok = buckets.writableBucket(WarmWrite)
  1425  	require.True(t, ok)
  1426  	// Current bucket encoders should all have data in them.
  1427  	for i := range bucket.encoders {
  1428  		encoder := bucket.encoders[i].encoder
  1429  
  1430  		_, ok := encoder.Stream(ctx)
  1431  		require.True(t, ok)
  1432  
  1433  		warmEncoders = append(warmEncoders, encoder)
  1434  	}
  1435  	// Ensure single encoder again.
  1436  	assert.Equal(t, 1, len(warmEncoders))
  1437  
  1438  	// Check internal state of cold bucket to make sure the merge happened and
  1439  	// was persisted.
  1440  	coldEncoders = coldEncoders[:0]
  1441  	buckets, ok = buffer.bucketVersionsAt(start)
  1442  	require.True(t, ok)
  1443  	bucket, ok = buckets.writableBucket(ColdWrite)
  1444  	require.True(t, ok)
  1445  	// Current bucket encoders should all have data in them.
  1446  	for i := range bucket.encoders {
  1447  		encoder := bucket.encoders[i].encoder
  1448  
  1449  		_, ok := encoder.Stream(ctx)
  1450  		require.True(t, ok)
  1451  
  1452  		coldEncoders = append(coldEncoders, encoder)
  1453  	}
  1454  	// Ensure single encoder again.
  1455  	assert.Equal(t, 1, len(coldEncoders))
  1456  }
  1457  
  1458  func mustGetLastEncoded(t *testing.T, entry inOrderEncoder) ts.Datapoint {
  1459  	last, err := entry.encoder.LastEncoded()
  1460  	require.NoError(t, err)
  1461  	return last
  1462  }
  1463  
  1464  func TestInOrderUnixNanosAddRemove(t *testing.T) {
  1465  	buffer := newDatabaseBuffer().(*dbBuffer)
  1466  	assertTimeSlicesEqual(t, []xtime.UnixNano{}, buffer.inOrderBlockStarts)
  1467  
  1468  	t3 := xtime.FromSeconds(3)
  1469  	t5 := xtime.FromSeconds(5)
  1470  	t7 := xtime.FromSeconds(7)
  1471  	t8 := xtime.FromSeconds(8)
  1472  
  1473  	buffer.inOrderBlockStartsAdd(t5)
  1474  	assertTimeSlicesEqual(t, []xtime.UnixNano{t5}, buffer.inOrderBlockStarts)
  1475  
  1476  	buffer.inOrderBlockStartsAdd(t3)
  1477  	assertTimeSlicesEqual(t, []xtime.UnixNano{t3, t5}, buffer.inOrderBlockStarts)
  1478  
  1479  	buffer.inOrderBlockStartsAdd(t8)
  1480  	assertTimeSlicesEqual(t, []xtime.UnixNano{t3, t5, t8}, buffer.inOrderBlockStarts)
  1481  
  1482  	buffer.inOrderBlockStartsAdd(t7)
  1483  	assertTimeSlicesEqual(t, []xtime.UnixNano{t3, t5, t7, t8}, buffer.inOrderBlockStarts)
  1484  
  1485  	buffer.inOrderBlockStartsRemove(t5)
  1486  	assertTimeSlicesEqual(t, []xtime.UnixNano{t3, t7, t8}, buffer.inOrderBlockStarts)
  1487  
  1488  	buffer.inOrderBlockStartsRemove(t3)
  1489  	assertTimeSlicesEqual(t, []xtime.UnixNano{t7, t8}, buffer.inOrderBlockStarts)
  1490  
  1491  	buffer.inOrderBlockStartsRemove(t8)
  1492  	assertTimeSlicesEqual(t, []xtime.UnixNano{t7}, buffer.inOrderBlockStarts)
  1493  
  1494  	buffer.inOrderBlockStartsRemove(t7)
  1495  	assertTimeSlicesEqual(t, []xtime.UnixNano{}, buffer.inOrderBlockStarts)
  1496  }
  1497  
  1498  func assertTimeSlicesEqual(t *testing.T, t1, t2 []xtime.UnixNano) {
  1499  	require.Equal(t, len(t1), len(t2))
  1500  	for i := range t1 {
  1501  		assert.Equal(t, t1[i], t2[i])
  1502  	}
  1503  }
  1504  
  1505  func TestOptimizedTimes(t *testing.T) {
  1506  	var times OptimizedTimes
  1507  	assert.Equal(t, 0, cap(times.slice))
  1508  	assert.Equal(t, 0, times.Len())
  1509  	assert.False(t, times.Contains(xtime.UnixNano(0)))
  1510  
  1511  	var expectedTimes []xtime.UnixNano
  1512  	var forEachTimes []xtime.UnixNano
  1513  	// ForEach should only call the provided func if there are actual times in
  1514  	// OptimizedTimes (OptimizedTimes contains an xtime.UnixNano array
  1515  	// internally and we don't want to run the func for those zero values unless
  1516  	// they were explicitly added).
  1517  	times.ForEach(func(tNano xtime.UnixNano) {
  1518  		forEachTimes = append(forEachTimes, tNano)
  1519  	})
  1520  	assertEqualUnixSlices(t, expectedTimes, forEachTimes)
  1521  
  1522  	expectedTimes = expectedTimes[:0]
  1523  	forEachTimes = forEachTimes[:0]
  1524  
  1525  	// These adds should only go in the array.
  1526  	for i := 0; i < optimizedTimesArraySize; i++ {
  1527  		tNano := xtime.UnixNano(i)
  1528  		times.Add(tNano)
  1529  		expectedTimes = append(expectedTimes, tNano)
  1530  
  1531  		assert.Equal(t, 0, cap(times.slice))
  1532  		assert.Equal(t, i+1, times.arrIdx)
  1533  		assert.Equal(t, i+1, times.Len())
  1534  		assert.True(t, times.Contains(tNano))
  1535  	}
  1536  
  1537  	numExtra := 5
  1538  	// These adds don't fit in the array any more, will go to the slice.
  1539  	for i := optimizedTimesArraySize; i < optimizedTimesArraySize+numExtra; i++ {
  1540  		tNano := xtime.UnixNano(i)
  1541  		times.Add(tNano)
  1542  		expectedTimes = append(expectedTimes, tNano)
  1543  
  1544  		assert.Equal(t, optimizedTimesArraySize, times.arrIdx)
  1545  		assert.Equal(t, i+1, times.Len())
  1546  		assert.True(t, times.Contains(tNano))
  1547  	}
  1548  
  1549  	times.ForEach(func(tNano xtime.UnixNano) {
  1550  		forEachTimes = append(forEachTimes, tNano)
  1551  	})
  1552  
  1553  	assertEqualUnixSlices(t, expectedTimes, forEachTimes)
  1554  }
  1555  
  1556  func assertEqualUnixSlices(t *testing.T, expected, actual []xtime.UnixNano) {
  1557  	require.Equal(t, len(expected), len(actual))
  1558  	for i := range expected {
  1559  		assert.Equal(t, expected[i], actual[i])
  1560  	}
  1561  }
  1562  
  1563  func TestColdFlushBlockStarts(t *testing.T) {
  1564  	opts := newBufferTestOptions()
  1565  	rops := opts.RetentionOptions()
  1566  	blockSize := rops.BlockSize()
  1567  	blockStart4 := xtime.Now().Truncate(blockSize)
  1568  	blockStart3 := blockStart4.Add(-2 * blockSize)
  1569  	blockStart2 := blockStart4.Add(-3 * blockSize)
  1570  	blockStart1 := blockStart4.Add(-4 * blockSize)
  1571  
  1572  	bds := []blockData{
  1573  		{
  1574  			start:     blockStart1,
  1575  			writeType: ColdWrite,
  1576  			data: [][]DecodedTestValue{
  1577  				{
  1578  					{blockStart1, 1, xtime.Second, nil},
  1579  					{blockStart1.Add(secs(5)), 2, xtime.Second, nil},
  1580  					{blockStart1.Add(secs(10)), 3, xtime.Second, nil},
  1581  				},
  1582  			},
  1583  		},
  1584  		{
  1585  			start:     blockStart2,
  1586  			writeType: ColdWrite,
  1587  			data: [][]DecodedTestValue{
  1588  				{
  1589  					{blockStart2.Add(secs(2)), 4, xtime.Second, nil},
  1590  					{blockStart2.Add(secs(5)), 5, xtime.Second, nil},
  1591  					{blockStart2.Add(secs(11)), 6, xtime.Second, nil},
  1592  					{blockStart2.Add(secs(15)), 7, xtime.Second, nil},
  1593  					{blockStart2.Add(secs(40)), 8, xtime.Second, nil},
  1594  				},
  1595  			},
  1596  		},
  1597  		{
  1598  			start:     blockStart3,
  1599  			writeType: ColdWrite,
  1600  			data: [][]DecodedTestValue{
  1601  				{
  1602  					{blockStart3.Add(secs(71)), 9, xtime.Second, nil},
  1603  				},
  1604  			},
  1605  		},
  1606  		{
  1607  			start:     blockStart4,
  1608  			writeType: WarmWrite,
  1609  			data: [][]DecodedTestValue{
  1610  				{
  1611  					{blockStart4.Add(secs(57)), 10, xtime.Second, nil},
  1612  					{blockStart4.Add(secs(66)), 11, xtime.Second, nil},
  1613  					{blockStart4.Add(secs(80)), 12, xtime.Second, nil},
  1614  					{blockStart4.Add(secs(81)), 13, xtime.Second, nil},
  1615  					{blockStart4.Add(secs(82)), 14, xtime.Second, nil},
  1616  					{blockStart4.Add(secs(96)), 15, xtime.Second, nil},
  1617  				},
  1618  			},
  1619  		},
  1620  	}
  1621  
  1622  	buffer, _ := newTestBufferWithCustomData(t, bds, opts, nil)
  1623  	blockStates := make(map[xtime.UnixNano]BlockState)
  1624  	blockStates[blockStart1] = BlockState{
  1625  		WarmRetrievable: true,
  1626  		ColdVersion:     0,
  1627  	}
  1628  	blockStates[blockStart2] = BlockState{
  1629  		WarmRetrievable: true,
  1630  		ColdVersion:     0,
  1631  	}
  1632  	blockStates[blockStart3] = BlockState{
  1633  		WarmRetrievable: true,
  1634  		ColdVersion:     0,
  1635  	}
  1636  	flushStarts := buffer.ColdFlushBlockStarts(blockStates)
  1637  
  1638  	// All three cold blocks should report that they are dirty.
  1639  	assert.Equal(t, 3, flushStarts.Len())
  1640  	assert.True(t, flushStarts.Contains(blockStart1))
  1641  	assert.True(t, flushStarts.Contains(blockStart2))
  1642  	assert.True(t, flushStarts.Contains(blockStart3))
  1643  
  1644  	// Simulate that block2 and block3 are flushed (but not yet evicted from
  1645  	// memory), so only block1 should report as dirty.
  1646  	buffer.bucketsMap[blockStart2].buckets[0].version = 1
  1647  	buffer.bucketsMap[blockStart3].buckets[0].version = 1
  1648  	blockStates[blockStart2] = BlockState{
  1649  		WarmRetrievable: true,
  1650  		ColdVersion:     1,
  1651  	}
  1652  	blockStates[blockStart3] = BlockState{
  1653  		WarmRetrievable: true,
  1654  		ColdVersion:     1,
  1655  	}
  1656  
  1657  	flushStarts = buffer.ColdFlushBlockStarts(blockStates)
  1658  	assert.Equal(t, 1, flushStarts.Len())
  1659  	assert.True(t, flushStarts.Contains(blockStart1))
  1660  
  1661  	// Simulate blockStart3 didn't get fully flushed, so it should be flushed
  1662  	// again.
  1663  	blockStates[blockStart3] = BlockState{
  1664  		WarmRetrievable: true,
  1665  		ColdVersion:     0,
  1666  	}
  1667  	flushStarts = buffer.ColdFlushBlockStarts(blockStates)
  1668  	assert.Equal(t, 2, flushStarts.Len())
  1669  	assert.True(t, flushStarts.Contains(blockStart1))
  1670  	assert.True(t, flushStarts.Contains(blockStart3))
  1671  }
  1672  
  1673  func TestFetchBlocksForColdFlush(t *testing.T) {
  1674  	now := xtime.Now()
  1675  	opts := newBufferTestOptions().SetColdWritesEnabled(true)
  1676  	opts = opts.SetClockOptions(
  1677  		opts.ClockOptions().SetNowFn(func() time.Time {
  1678  			return now.ToTime()
  1679  		}),
  1680  	)
  1681  	rops := opts.RetentionOptions()
  1682  	blockSize := rops.BlockSize()
  1683  	blockStart4 := xtime.Now().Truncate(blockSize)
  1684  	blockStart3 := blockStart4.Add(-2 * blockSize)
  1685  	blockStart2 := blockStart4.Add(-3 * blockSize)
  1686  	blockStart1 := blockStart4.Add(-4 * blockSize)
  1687  
  1688  	bds := []blockData{
  1689  		{
  1690  			start:     blockStart1,
  1691  			writeType: ColdWrite,
  1692  			data: [][]DecodedTestValue{
  1693  				{
  1694  					{blockStart1, 1, xtime.Second, nil},
  1695  					{blockStart1.Add(secs(5)), 2, xtime.Second, nil},
  1696  					{blockStart1.Add(secs(10)), 3, xtime.Second, nil},
  1697  				},
  1698  			},
  1699  		},
  1700  		{
  1701  			start:     blockStart3,
  1702  			writeType: ColdWrite,
  1703  			data: [][]DecodedTestValue{
  1704  				{
  1705  					{blockStart3.Add(secs(71)), 9, xtime.Second, nil},
  1706  				},
  1707  			},
  1708  		},
  1709  		{
  1710  			start:     blockStart4,
  1711  			writeType: WarmWrite,
  1712  			data: [][]DecodedTestValue{
  1713  				{
  1714  					{blockStart4.Add(secs(57)), 10, xtime.Second, nil},
  1715  					{blockStart4.Add(secs(66)), 11, xtime.Second, nil},
  1716  					{blockStart4.Add(secs(80)), 12, xtime.Second, nil},
  1717  					{blockStart4.Add(secs(81)), 13, xtime.Second, nil},
  1718  					{blockStart4.Add(secs(82)), 14, xtime.Second, nil},
  1719  					{blockStart4.Add(secs(96)), 15, xtime.Second, nil},
  1720  				},
  1721  			},
  1722  		},
  1723  	}
  1724  
  1725  	buffer, expected := newTestBufferWithCustomData(t, bds, opts, nil)
  1726  	ctx := context.NewBackground()
  1727  	defer ctx.Close()
  1728  	nsCtx := namespace.Context{Schema: testSchemaDesc}
  1729  	result, err := buffer.FetchBlocksForColdFlush(ctx, blockStart1, 4, nsCtx)
  1730  	assert.NoError(t, err)
  1731  	// Verify that we got the correct data and that version is correct set.
  1732  	requireReaderValuesEqual(t, expected[blockStart1], [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
  1733  	assert.Equal(t, 4, buffer.bucketsMap[blockStart1].buckets[0].version)
  1734  	assert.Equal(t, now, result.FirstWrite)
  1735  
  1736  	// Try to fetch from block1 again, this should not be an error because we
  1737  	// would want to fetch blocks with buckets that failed to flush fully a
  1738  	// previous time.
  1739  	result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart1, 9, nsCtx)
  1740  	assert.NoError(t, err)
  1741  	assert.Equal(t, now, result.FirstWrite)
  1742  
  1743  	// Verify that writing to a cold block updates the first write time. No data in blockStart2 yet.
  1744  	result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart2, 1, nsCtx)
  1745  	assert.NoError(t, err)
  1746  	requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
  1747  	assert.Equal(t, 0, int(result.FirstWrite))
  1748  	wasWritten, _, err := buffer.Write(ctx, testID, blockStart2, 1,
  1749  		xtime.Second, nil, WriteOptions{})
  1750  	assert.True(t, wasWritten)
  1751  	result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart2, 1, nsCtx)
  1752  	assert.NoError(t, err)
  1753  	assert.Equal(t, now, result.FirstWrite)
  1754  
  1755  	result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart3, 1, nsCtx)
  1756  	assert.NoError(t, err)
  1757  	requireReaderValuesEqual(t, expected[blockStart3], [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
  1758  	assert.Equal(t, 1, buffer.bucketsMap[blockStart3].buckets[0].version)
  1759  	assert.Equal(t, now, result.FirstWrite)
  1760  
  1761  	// Try to fetch from a block that only has warm buckets. It has no data
  1762  	// but is not an error.
  1763  	result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart4, 1, nsCtx)
  1764  	assert.NoError(t, err)
  1765  	requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
  1766  	assert.Equal(t, 0, int(result.FirstWrite))
  1767  }
  1768  
  1769  // TestBufferLoadWarmWrite tests the Load method, ensuring that blocks are successfully loaded into
  1770  // the buffer and treated as warm writes.
  1771  func TestBufferLoadWarmWrite(t *testing.T) {
  1772  	var (
  1773  		opts      = newBufferTestOptions()
  1774  		buffer    = newDatabaseBuffer()
  1775  		blockSize = opts.RetentionOptions().BlockSize()
  1776  		curr      = xtime.Now().Truncate(blockSize)
  1777  		nsCtx     = namespace.Context{}
  1778  	)
  1779  	buffer.Reset(databaseBufferResetOptions{
  1780  		Options: opts,
  1781  	})
  1782  	encoded, err := buffer.ReadEncoded(context.NewBackground(), curr, curr.Add(blockSize), nsCtx)
  1783  	require.NoError(t, err)
  1784  	require.Equal(t, 0, len(encoded))
  1785  
  1786  	data := checked.NewBytes([]byte("some-data"), nil)
  1787  	data.IncRef()
  1788  	segment := ts.Segment{Head: data}
  1789  	block := block.NewDatabaseBlock(curr, blockSize, segment, opts.DatabaseBlockOptions(), nsCtx)
  1790  	buffer.Load(block, WarmWrite)
  1791  
  1792  	// Ensure the bootstrapped block is loaded and readable.
  1793  	encoded, err = buffer.ReadEncoded(context.NewBackground(), curr, curr.Add(blockSize), nsCtx)
  1794  	require.NoError(t, err)
  1795  	require.Equal(t, 1, len(encoded))
  1796  
  1797  	// Ensure bootstrapped blocks are loaded as warm writes.
  1798  	coldFlushBlockStarts := buffer.ColdFlushBlockStarts(nil)
  1799  	require.Equal(t, 0, coldFlushBlockStarts.Len())
  1800  }
  1801  
  1802  // TestBufferLoadColdWrite tests the Load method, ensuring that blocks are successfully loaded into
  1803  // the buffer and treated as cold writes.
  1804  func TestBufferLoadColdWrite(t *testing.T) {
  1805  	var (
  1806  		opts      = newBufferTestOptions()
  1807  		buffer    = newDatabaseBuffer()
  1808  		blockSize = opts.RetentionOptions().BlockSize()
  1809  		curr      = xtime.Now().Truncate(blockSize)
  1810  		nsCtx     = namespace.Context{}
  1811  	)
  1812  	buffer.Reset(databaseBufferResetOptions{
  1813  		Options: opts,
  1814  	})
  1815  	encoded, err := buffer.ReadEncoded(context.NewBackground(), curr, curr.Add(blockSize), nsCtx)
  1816  	require.NoError(t, err)
  1817  	require.Equal(t, 0, len(encoded))
  1818  
  1819  	data := checked.NewBytes([]byte("some-data"), nil)
  1820  	data.IncRef()
  1821  	segment := ts.Segment{Head: data}
  1822  	block := block.NewDatabaseBlock(curr, blockSize, segment, opts.DatabaseBlockOptions(), nsCtx)
  1823  	buffer.Load(block, ColdWrite)
  1824  
  1825  	// Ensure the bootstrapped block is loaded and readable.
  1826  	encoded, err = buffer.ReadEncoded(context.NewBackground(), curr, curr.Add(blockSize), nsCtx)
  1827  	require.NoError(t, err)
  1828  	require.Equal(t, 1, len(encoded))
  1829  
  1830  	// Ensure bootstrapped blocks are loaded as cold writes.
  1831  	coldFlushBlockStarts := buffer.ColdFlushBlockStarts(nil)
  1832  	require.Equal(t, 1, coldFlushBlockStarts.Len())
  1833  }
  1834  
  1835  func TestUpsertProto(t *testing.T) {
  1836  	opts := newBufferTestOptions()
  1837  	rops := opts.RetentionOptions()
  1838  	curr := xtime.Now().Truncate(rops.BlockSize())
  1839  	opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  1840  		return curr.ToTime()
  1841  	}))
  1842  	var nsCtx namespace.Context
  1843  
  1844  	tests := []struct {
  1845  		desc         string
  1846  		writes       []writeAttempt
  1847  		expectedData []DecodedTestValue
  1848  	}{
  1849  		{
  1850  			desc: "Upsert proto",
  1851  			writes: []writeAttempt{
  1852  				{
  1853  					data:          DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
  1854  					expectWritten: true,
  1855  					expectErr:     false,
  1856  				},
  1857  				{
  1858  					data:          DecodedTestValue{curr, 0, xtime.Second, []byte("two")},
  1859  					expectWritten: true,
  1860  					expectErr:     false,
  1861  				},
  1862  			},
  1863  			expectedData: []DecodedTestValue{
  1864  				{curr, 0, xtime.Second, []byte("two")},
  1865  			},
  1866  		},
  1867  		{
  1868  			desc: "Duplicate proto",
  1869  			writes: []writeAttempt{
  1870  				{
  1871  					data:          DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
  1872  					expectWritten: true,
  1873  					expectErr:     false,
  1874  				},
  1875  				{
  1876  					data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
  1877  					// Writes with the same value and the same annotation should
  1878  					// not be written.
  1879  					expectWritten: false,
  1880  					expectErr:     false,
  1881  				},
  1882  			},
  1883  			expectedData: []DecodedTestValue{
  1884  				{curr, 0, xtime.Second, []byte("one")},
  1885  			},
  1886  		},
  1887  		{
  1888  			desc: "Two datapoints different proto",
  1889  			writes: []writeAttempt{
  1890  				{
  1891  					data:          DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
  1892  					expectWritten: true,
  1893  					expectErr:     false,
  1894  				},
  1895  				{
  1896  					data:          DecodedTestValue{curr.Add(time.Second), 0, xtime.Second, []byte("two")},
  1897  					expectWritten: true,
  1898  					expectErr:     false,
  1899  				},
  1900  			},
  1901  			expectedData: []DecodedTestValue{
  1902  				{curr, 0, xtime.Second, []byte("one")},
  1903  				{curr.Add(time.Second), 0, xtime.Second, []byte("two")},
  1904  			},
  1905  		},
  1906  		{
  1907  			desc: "Two datapoints same proto",
  1908  			writes: []writeAttempt{
  1909  				{
  1910  					data:          DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
  1911  					expectWritten: true,
  1912  					expectErr:     false,
  1913  				},
  1914  				{
  1915  					data:          DecodedTestValue{curr.Add(time.Second), 0, xtime.Second, []byte("one")},
  1916  					expectWritten: true,
  1917  					expectErr:     false,
  1918  				},
  1919  			},
  1920  			expectedData: []DecodedTestValue{
  1921  				{curr, 0, xtime.Second, []byte("one")},
  1922  				// This is special cased in the proto encoder. It has logic
  1923  				// handling the case where two values are the same and writes
  1924  				// that nothing has changed instead of re-encoding the blob
  1925  				// again.
  1926  				{curr.Add(time.Second), 0, xtime.Second, nil},
  1927  			},
  1928  		},
  1929  	}
  1930  
  1931  	for _, test := range tests {
  1932  		t.Run(test.desc, func(t *testing.T) {
  1933  			buffer := newDatabaseBuffer().(*dbBuffer)
  1934  			buffer.Reset(databaseBufferResetOptions{
  1935  				Options: opts,
  1936  			})
  1937  
  1938  			for _, write := range test.writes {
  1939  				verifyWriteToBuffer(t, testID, buffer, write.data, nsCtx.Schema,
  1940  					write.expectWritten, write.expectErr)
  1941  			}
  1942  
  1943  			ctx := context.NewBackground()
  1944  			defer ctx.Close()
  1945  
  1946  			results, err := buffer.ReadEncoded(ctx, 0, timeDistantFuture, nsCtx)
  1947  			assert.NoError(t, err)
  1948  			assert.NotNil(t, results)
  1949  
  1950  			requireReaderValuesEqual(t, test.expectedData, results, opts, nsCtx)
  1951  		})
  1952  	}
  1953  }
  1954  
  1955  func TestMarkNonEmptyBlocks(t *testing.T) {
  1956  	var (
  1957  		now  = xtime.Now()
  1958  		opts = newBufferTestOptions().
  1959  			SetColdWritesEnabled(true).
  1960  			SetClockOptions(clock.NewOptions().SetNowFn(func() time.Time { return now.ToTime() }))
  1961  		rops        = opts.RetentionOptions()
  1962  		blockSize   = rops.BlockSize()
  1963  		blockStart4 = now.Truncate(blockSize)
  1964  		blockStart3 = blockStart4.Add(-2 * blockSize)
  1965  		blockStart2 = blockStart4.Add(-3 * blockSize)
  1966  		blockStart1 = blockStart4.Add(-4 * blockSize)
  1967  		bds         = []blockData{
  1968  			{
  1969  				start:     blockStart1,
  1970  				writeType: ColdWrite,
  1971  				data: [][]DecodedTestValue{
  1972  					{
  1973  						{blockStart1.Add(secs(1)), 1, xtime.Second, nil},
  1974  					},
  1975  				},
  1976  			},
  1977  			{
  1978  				start:     blockStart2,
  1979  				writeType: ColdWrite,
  1980  				data:      [][]DecodedTestValue{},
  1981  			},
  1982  			{
  1983  				start:     blockStart3,
  1984  				writeType: ColdWrite,
  1985  				data: [][]DecodedTestValue{
  1986  					{
  1987  						{blockStart3.Add(secs(1)), 1, xtime.Second, nil},
  1988  					},
  1989  				},
  1990  			},
  1991  			{
  1992  				start:     blockStart4,
  1993  				writeType: WarmWrite,
  1994  				data: [][]DecodedTestValue{
  1995  					{
  1996  						{blockStart4.Add(secs(1)), 1, xtime.Second, nil},
  1997  					},
  1998  				},
  1999  			},
  2000  		}
  2001  	)
  2002  
  2003  	buffer, _ := newTestBufferWithCustomData(t, bds, opts, nil)
  2004  	ctx := context.NewBackground()
  2005  	defer ctx.Close()
  2006  
  2007  	nonEmptyBlocks := map[xtime.UnixNano]struct{}{}
  2008  	buffer.MarkNonEmptyBlocks(nonEmptyBlocks)
  2009  	assert.Len(t, nonEmptyBlocks, 3)
  2010  	assert.Contains(t, nonEmptyBlocks, blockStart1)
  2011  	assert.NotContains(t, nonEmptyBlocks, blockStart2)
  2012  	assert.Contains(t, nonEmptyBlocks, blockStart3)
  2013  	assert.Contains(t, nonEmptyBlocks, blockStart4)
  2014  }
  2015  
  2016  type writeAttempt struct {
  2017  	data          DecodedTestValue
  2018  	expectWritten bool
  2019  	expectErr     bool
  2020  }
  2021  
  2022  func TestEncoderLimit(t *testing.T) {
  2023  	type writeTimeOffset struct {
  2024  		timeOffset               int
  2025  		expectTooManyEncodersErr bool
  2026  	}
  2027  
  2028  	tests := []struct {
  2029  		desc                  string
  2030  		encodersPerBlockLimit int
  2031  		writes                []writeTimeOffset
  2032  	}{
  2033  		{
  2034  			desc:                  "one encoder, no limit",
  2035  			encodersPerBlockLimit: 0, // 0 means no limit.
  2036  			writes: []writeTimeOffset{
  2037  				// Writes are in order, so just one encoder.
  2038  				{
  2039  					timeOffset:               1,
  2040  					expectTooManyEncodersErr: false,
  2041  				},
  2042  				{
  2043  					timeOffset:               2,
  2044  					expectTooManyEncodersErr: false,
  2045  				},
  2046  				{
  2047  					timeOffset:               3,
  2048  					expectTooManyEncodersErr: false,
  2049  				},
  2050  				{
  2051  					timeOffset:               4,
  2052  					expectTooManyEncodersErr: false,
  2053  				},
  2054  			},
  2055  		},
  2056  		{
  2057  			desc:                  "many encoders, no limit",
  2058  			encodersPerBlockLimit: 0, // 0 means no limit.
  2059  			writes: []writeTimeOffset{
  2060  				// Writes are in reverse chronological order, so every write
  2061  				// requires a new encoder.
  2062  				{
  2063  					timeOffset:               9,
  2064  					expectTooManyEncodersErr: false,
  2065  				},
  2066  				{
  2067  					timeOffset:               8,
  2068  					expectTooManyEncodersErr: false,
  2069  				},
  2070  				{
  2071  					timeOffset:               7,
  2072  					expectTooManyEncodersErr: false,
  2073  				},
  2074  				{
  2075  					timeOffset:               6,
  2076  					expectTooManyEncodersErr: false,
  2077  				},
  2078  				{
  2079  					timeOffset:               5,
  2080  					expectTooManyEncodersErr: false,
  2081  				},
  2082  				{
  2083  					timeOffset:               4,
  2084  					expectTooManyEncodersErr: false,
  2085  				},
  2086  				{
  2087  					timeOffset:               3,
  2088  					expectTooManyEncodersErr: false,
  2089  				},
  2090  				{
  2091  					timeOffset:               2,
  2092  					expectTooManyEncodersErr: false,
  2093  				},
  2094  			},
  2095  		},
  2096  		{
  2097  			desc:                  "within limit",
  2098  			encodersPerBlockLimit: 3,
  2099  			writes: []writeTimeOffset{
  2100  				// First encoder created.
  2101  				{
  2102  					timeOffset:               3,
  2103  					expectTooManyEncodersErr: false,
  2104  				},
  2105  				// Second encoder created.
  2106  				{
  2107  					timeOffset:               2,
  2108  					expectTooManyEncodersErr: false,
  2109  				},
  2110  				// Third encoder created.
  2111  				{
  2112  					timeOffset:               1,
  2113  					expectTooManyEncodersErr: false,
  2114  				},
  2115  			},
  2116  		},
  2117  		{
  2118  			desc:                  "within limit, many writes",
  2119  			encodersPerBlockLimit: 2,
  2120  			writes: []writeTimeOffset{
  2121  				// First encoder created.
  2122  				{
  2123  					timeOffset:               10,
  2124  					expectTooManyEncodersErr: false,
  2125  				},
  2126  				// Goes in first encoder.
  2127  				{
  2128  					timeOffset:               11,
  2129  					expectTooManyEncodersErr: false,
  2130  				},
  2131  				// Goes in first encoder.
  2132  				{
  2133  					timeOffset:               12,
  2134  					expectTooManyEncodersErr: false,
  2135  				},
  2136  				// Second encoder created.
  2137  				{
  2138  					timeOffset:               1,
  2139  					expectTooManyEncodersErr: false,
  2140  				},
  2141  				// Goes in second encoder.
  2142  				{
  2143  					timeOffset:               2,
  2144  					expectTooManyEncodersErr: false,
  2145  				},
  2146  				// Goes in first encoder.
  2147  				{
  2148  					timeOffset:               13,
  2149  					expectTooManyEncodersErr: false,
  2150  				},
  2151  				// Goes in second encoder.
  2152  				{
  2153  					timeOffset:               3,
  2154  					expectTooManyEncodersErr: false,
  2155  				},
  2156  			},
  2157  		},
  2158  		{
  2159  			desc:                  "too many encoders",
  2160  			encodersPerBlockLimit: 3,
  2161  			writes: []writeTimeOffset{
  2162  				// First encoder created.
  2163  				{
  2164  					timeOffset:               5,
  2165  					expectTooManyEncodersErr: false,
  2166  				},
  2167  				// Second encoder created.
  2168  				{
  2169  					timeOffset:               4,
  2170  					expectTooManyEncodersErr: false,
  2171  				},
  2172  				// Third encoder created.
  2173  				{
  2174  					timeOffset:               3,
  2175  					expectTooManyEncodersErr: false,
  2176  				},
  2177  				// Requires fourth encoder, which is past the limit.
  2178  				{
  2179  					timeOffset:               2,
  2180  					expectTooManyEncodersErr: true,
  2181  				},
  2182  			},
  2183  		},
  2184  		{
  2185  			desc:                  "too many encoders, more writes",
  2186  			encodersPerBlockLimit: 2,
  2187  			writes: []writeTimeOffset{
  2188  				// First encoder created.
  2189  				{
  2190  					timeOffset:               10,
  2191  					expectTooManyEncodersErr: false,
  2192  				},
  2193  				// Second encoder created.
  2194  				{
  2195  					timeOffset:               2,
  2196  					expectTooManyEncodersErr: false,
  2197  				},
  2198  				// Goes in second encoder.
  2199  				{
  2200  					timeOffset:               3,
  2201  					expectTooManyEncodersErr: false,
  2202  				},
  2203  				// Goes in first encoder.
  2204  				{
  2205  					timeOffset:               11,
  2206  					expectTooManyEncodersErr: false,
  2207  				},
  2208  				// Requires third encoder, which is past the limit.
  2209  				{
  2210  					timeOffset:               1,
  2211  					expectTooManyEncodersErr: true,
  2212  				},
  2213  				// Goes in second encoder.
  2214  				{
  2215  					timeOffset:               4,
  2216  					expectTooManyEncodersErr: false,
  2217  				},
  2218  			},
  2219  		},
  2220  	}
  2221  
  2222  	for _, test := range tests {
  2223  		t.Run(test.desc, func(t *testing.T) {
  2224  			opts := newBufferTestOptions()
  2225  			rops := opts.RetentionOptions()
  2226  			curr := xtime.Now().Truncate(rops.BlockSize())
  2227  			opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
  2228  				return curr.ToTime()
  2229  			}))
  2230  			runtimeOptsMgr := opts.RuntimeOptionsManager()
  2231  			newRuntimeOpts := runtimeOptsMgr.Get().
  2232  				SetEncodersPerBlockLimit(test.encodersPerBlockLimit)
  2233  			runtimeOptsMgr.Update(newRuntimeOpts)
  2234  			buffer := newDatabaseBuffer().(*dbBuffer)
  2235  			buffer.Reset(databaseBufferResetOptions{Options: opts})
  2236  			ctx := context.NewBackground()
  2237  			defer ctx.Close()
  2238  
  2239  			for i, write := range test.writes {
  2240  				wasWritten, writeType, err := buffer.Write(ctx, testID,
  2241  					curr.Add(time.Duration(write.timeOffset)*time.Millisecond),
  2242  					float64(i), xtime.Millisecond, nil, WriteOptions{})
  2243  
  2244  				if write.expectTooManyEncodersErr {
  2245  					assert.Error(t, err)
  2246  					assert.True(t, xerrors.IsInvalidParams(err))
  2247  					assert.Equal(t, errTooManyEncoders, err)
  2248  				} else {
  2249  					assert.NoError(t, err)
  2250  					assert.True(t, wasWritten)
  2251  					assert.Equal(t, WarmWrite, writeType)
  2252  				}
  2253  			}
  2254  		})
  2255  	}
  2256  }