github.com/m3db/m3@v1.5.0/src/dbnode/network/server/tchannelthrift/node/service_test.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package node
    22  
    23  import (
    24  	"bytes"
    25  	gocontext "context"
    26  	"errors"
    27  	"fmt"
    28  	"sort"
    29  	"sync"
    30  	"testing"
    31  	"time"
    32  
    33  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    34  	"github.com/m3db/m3/src/dbnode/namespace"
    35  	"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
    36  	"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/convert"
    37  	tterrors "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/errors"
    38  	"github.com/m3db/m3/src/dbnode/runtime"
    39  	"github.com/m3db/m3/src/dbnode/storage"
    40  	"github.com/m3db/m3/src/dbnode/storage/block"
    41  	"github.com/m3db/m3/src/dbnode/storage/index"
    42  	conv "github.com/m3db/m3/src/dbnode/storage/index/convert"
    43  	"github.com/m3db/m3/src/dbnode/storage/limits"
    44  	"github.com/m3db/m3/src/dbnode/storage/limits/permits"
    45  	"github.com/m3db/m3/src/dbnode/storage/series"
    46  	"github.com/m3db/m3/src/dbnode/topology"
    47  	"github.com/m3db/m3/src/dbnode/tracepoint"
    48  	"github.com/m3db/m3/src/dbnode/ts"
    49  	"github.com/m3db/m3/src/dbnode/ts/writes"
    50  	"github.com/m3db/m3/src/dbnode/x/xio"
    51  	"github.com/m3db/m3/src/m3ninx/doc"
    52  	"github.com/m3db/m3/src/m3ninx/idx"
    53  	"github.com/m3db/m3/src/x/checked"
    54  	"github.com/m3db/m3/src/x/context"
    55  	"github.com/m3db/m3/src/x/ident"
    56  	xtest "github.com/m3db/m3/src/x/test"
    57  	xtime "github.com/m3db/m3/src/x/time"
    58  
    59  	"github.com/golang/mock/gomock"
    60  	"github.com/opentracing/opentracing-go"
    61  	"github.com/opentracing/opentracing-go/mocktracer"
    62  	"github.com/stretchr/testify/assert"
    63  	"github.com/stretchr/testify/require"
    64  	"github.com/uber/tchannel-go/thrift"
    65  )
    66  
    67  // Create opts once to avoid recreating a lot of default pools, etc
    68  var (
    69  	testIndexOptions          = index.NewOptions()
    70  	testNamespaceOptions      = namespace.NewOptions()
    71  	testStorageOpts           = storage.DefaultTestOptions()
    72  	testTChannelThriftOptions = tchannelthrift.NewOptions()
    73  )
    74  
    75  func init() {
    76  	// Set all pool sizes to 1 for tests
    77  	segmentArrayPoolSize = 1
    78  	writeBatchPooledReqPoolSize = 1
    79  }
    80  
    81  func TestServiceHealth(t *testing.T) {
    82  	ctrl := xtest.NewController(t)
    83  	defer ctrl.Finish()
    84  
    85  	mockDB := storage.NewMockDatabase(ctrl)
    86  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
    87  
    88  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
    89  
    90  	// Assert bootstrapped false
    91  	mockDB.EXPECT().IsBootstrappedAndDurable().Return(false)
    92  
    93  	tctx, _ := thrift.NewContext(time.Minute)
    94  	result, err := service.Health(tctx)
    95  	require.NoError(t, err)
    96  
    97  	assert.Equal(t, true, result.Ok)
    98  	assert.Equal(t, "up", result.Status)
    99  	assert.Equal(t, false, result.Bootstrapped)
   100  
   101  	// Assert bootstrapped true
   102  	mockDB.EXPECT().IsBootstrappedAndDurable().Return(true)
   103  
   104  	tctx, _ = thrift.NewContext(time.Minute)
   105  	result, err = service.Health(tctx)
   106  	require.NoError(t, err)
   107  
   108  	assert.Equal(t, true, result.Ok)
   109  	assert.Equal(t, "up", result.Status)
   110  	assert.Equal(t, true, result.Bootstrapped)
   111  }
   112  
   113  func TestServiceBootstrapped(t *testing.T) {
   114  	ctrl := xtest.NewController(t)
   115  	defer ctrl.Finish()
   116  
   117  	mockDB := storage.NewMockDatabase(ctrl)
   118  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   119  
   120  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   121  
   122  	// Should return an error when not bootstrapped
   123  	mockDB.EXPECT().IsBootstrappedAndDurable().Return(false)
   124  	tctx, _ := thrift.NewContext(time.Minute)
   125  	_, err := service.Bootstrapped(tctx)
   126  	require.Error(t, err)
   127  
   128  	// Should not return an error when bootstrapped
   129  	mockDB.EXPECT().IsBootstrappedAndDurable().Return(true)
   130  
   131  	tctx, _ = thrift.NewContext(time.Minute)
   132  	_, err = service.Health(tctx)
   133  	require.NoError(t, err)
   134  }
   135  
   136  func TestServiceBootstrappedInPlacementOrNoPlacement(t *testing.T) {
   137  	type TopologyIsSetResult struct {
   138  		result bool
   139  		err    error
   140  	}
   141  
   142  	type bootstrappedAndDurableResult struct {
   143  		result bool
   144  	}
   145  
   146  	tests := []struct {
   147  		name                   string
   148  		dbSet                  bool
   149  		TopologyIsSet          *TopologyIsSetResult
   150  		bootstrappedAndDurable *bootstrappedAndDurableResult
   151  		expectErr              bool
   152  	}{
   153  		{
   154  			name:                   "bootstrapped in placement",
   155  			dbSet:                  true,
   156  			TopologyIsSet:          &TopologyIsSetResult{result: true, err: nil},
   157  			bootstrappedAndDurable: &bootstrappedAndDurableResult{result: true},
   158  		},
   159  		{
   160  			name:          "not in placement",
   161  			dbSet:         true,
   162  			TopologyIsSet: &TopologyIsSetResult{result: false, err: nil},
   163  		},
   164  		{
   165  			name:          "topology check error",
   166  			dbSet:         true,
   167  			TopologyIsSet: &TopologyIsSetResult{result: false, err: errors.New("an error")},
   168  			expectErr:     true,
   169  		},
   170  		{
   171  			name:          "db not set in placement",
   172  			dbSet:         false,
   173  			TopologyIsSet: &TopologyIsSetResult{result: true, err: nil},
   174  			expectErr:     true,
   175  		},
   176  		{
   177  			name:                   "not bootstrapped in placement",
   178  			dbSet:                  true,
   179  			TopologyIsSet:          &TopologyIsSetResult{result: true, err: nil},
   180  			bootstrappedAndDurable: &bootstrappedAndDurableResult{result: false},
   181  			expectErr:              true,
   182  		},
   183  	}
   184  
   185  	for _, test := range tests {
   186  		t.Run(test.name, func(t *testing.T) {
   187  			ctrl := xtest.NewController(t)
   188  			defer ctrl.Finish()
   189  
   190  			// Simulate placement
   191  			mockTopoInit := topology.NewMockInitializer(ctrl)
   192  			if r := test.TopologyIsSet; r != nil {
   193  				mockTopoInit.EXPECT().TopologyIsSet().Return(r.result, r.err)
   194  			}
   195  
   196  			var db storage.Database
   197  			if test.dbSet {
   198  				mockDB := storage.NewMockDatabase(ctrl)
   199  				mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   200  				// Simulate bootstrapped and durable
   201  				if r := test.bootstrappedAndDurable; r != nil {
   202  					mockDB.EXPECT().IsBootstrappedAndDurable().Return(r.result)
   203  				}
   204  				db = mockDB
   205  			}
   206  
   207  			testOpts := testTChannelThriftOptions.
   208  				SetTopologyInitializer(mockTopoInit)
   209  			service := NewService(db, testOpts).(*service)
   210  
   211  			// Call BootstrappedInPlacementOrNoPlacement
   212  			tctx, _ := thrift.NewContext(time.Minute)
   213  			_, err := service.BootstrappedInPlacementOrNoPlacement(tctx)
   214  			if test.expectErr {
   215  				require.Error(t, err)
   216  			} else {
   217  				require.NoError(t, err)
   218  			}
   219  		})
   220  	}
   221  }
   222  
   223  func TestServiceQuery(t *testing.T) {
   224  	ctrl := xtest.NewController(t)
   225  	defer ctrl.Finish()
   226  
   227  	mockDB := storage.NewMockDatabase(ctrl)
   228  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   229  	mockDB.EXPECT().IsOverloaded().Return(false)
   230  
   231  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   232  
   233  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   234  	ctx := tchannelthrift.Context(tctx)
   235  	defer ctx.Close()
   236  
   237  	start := xtime.Now().Add(-2 * time.Hour)
   238  	end := start.Add(2 * time.Hour)
   239  
   240  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   241  
   242  	enc := testStorageOpts.EncoderPool().Get()
   243  	enc.Reset(start, 0, nil)
   244  
   245  	nsID := "metrics"
   246  
   247  	streams := map[string]xio.SegmentReader{}
   248  	seriesData := map[string][]struct {
   249  		t xtime.UnixNano
   250  		v float64
   251  	}{
   252  		"foo": {
   253  			{start.Add(10 * time.Second), 1.0},
   254  			{start.Add(20 * time.Second), 2.0},
   255  		},
   256  		"bar": {
   257  			{start.Add(20 * time.Second), 3.0},
   258  			{start.Add(30 * time.Second), 4.0},
   259  		},
   260  	}
   261  	tags := map[string][]struct {
   262  		name  string
   263  		value string
   264  	}{
   265  		"foo": {{"foo", "bar"}, {"baz", "dxk"}},
   266  		"bar": {{"foo", "bar"}, {"dzk", "baz"}},
   267  	}
   268  	for id, s := range seriesData {
   269  		enc := testStorageOpts.EncoderPool().Get()
   270  		enc.Reset(start, 0, nil)
   271  		for _, v := range s {
   272  			dp := ts.Datapoint{
   273  				TimestampNanos: v.t,
   274  				Value:          v.v,
   275  			}
   276  			require.NoError(t, enc.Encode(dp, xtime.Second, nil))
   277  		}
   278  
   279  		stream, _ := enc.Stream(ctx)
   280  		streams[id] = stream
   281  		mockDB.EXPECT().
   282  			ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
   283  			Return(&series.FakeBlockReaderIter{
   284  				Readers: [][]xio.BlockReader{{
   285  					xio.BlockReader{
   286  						SegmentReader: stream,
   287  					},
   288  				}},
   289  			}, nil)
   290  	}
   291  
   292  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
   293  	require.NoError(t, err)
   294  	qry := index.Query{Query: req}
   295  
   296  	md1 := doc.Metadata{
   297  		ID: ident.BytesID("foo"),
   298  		Fields: []doc.Field{
   299  			{
   300  				Name:  []byte("foo"),
   301  				Value: []byte("bar"),
   302  			},
   303  			{
   304  				Name:  []byte("baz"),
   305  				Value: []byte("dxk"),
   306  			},
   307  		},
   308  	}
   309  	md2 := doc.Metadata{
   310  		ID: ident.BytesID("bar"),
   311  		Fields: []doc.Field{
   312  			{
   313  				Name:  []byte("foo"),
   314  				Value: []byte("bar"),
   315  			},
   316  			{
   317  				Name:  []byte("dzk"),
   318  				Value: []byte("baz"),
   319  			},
   320  		},
   321  	}
   322  
   323  	resMap := index.NewQueryResults(ident.StringID(nsID),
   324  		index.QueryResultsOptions{}, testIndexOptions)
   325  	resMap.Map().Set(md1.ID, doc.NewDocumentFromMetadata(md1))
   326  	resMap.Map().Set(md2.ID, doc.NewDocumentFromMetadata(md2))
   327  
   328  	mockDB.EXPECT().QueryIDs(
   329  		ctx,
   330  		ident.NewIDMatcher(nsID),
   331  		index.NewQueryMatcher(qry),
   332  		index.QueryOptions{
   333  			StartInclusive: start,
   334  			EndExclusive:   end,
   335  			SeriesLimit:    10,
   336  		}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
   337  
   338  	limit := int64(10)
   339  	r, err := service.Query(tctx, &rpc.QueryRequest{
   340  		Query: &rpc.Query{
   341  			Regexp: &rpc.RegexpQuery{
   342  				Field:  "foo",
   343  				Regexp: "b.*",
   344  			},
   345  		},
   346  		RangeStart:     start.Seconds(),
   347  		RangeEnd:       end.Seconds(),
   348  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   349  		NameSpace:      nsID,
   350  		Limit:          &limit,
   351  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   352  	})
   353  	require.NoError(t, err)
   354  
   355  	// sort to order results to make test deterministic.
   356  	sort.Slice(r.Results, func(i, j int) bool {
   357  		return r.Results[i].ID < r.Results[j].ID
   358  	})
   359  
   360  	ids := []string{"bar", "foo"}
   361  	require.Equal(t, len(ids), len(r.Results))
   362  	for i, id := range ids {
   363  		elem := r.Results[i]
   364  		require.NotNil(t, elem)
   365  
   366  		require.Equal(t, elem.ID, id)
   367  		require.Equal(t, len(tags[id]), len(elem.Tags))
   368  		for i, tag := range elem.Tags {
   369  			assert.Equal(t, tags[id][i].name, tag.Name)
   370  			assert.Equal(t, tags[id][i].value, tag.Value)
   371  		}
   372  
   373  		require.Equal(t, len(seriesData[id]), len(elem.Datapoints))
   374  		for i, dp := range elem.Datapoints {
   375  			assert.Equal(t, seriesData[id][i].t.Seconds(), dp.Timestamp)
   376  			assert.Equal(t, seriesData[id][i].v, dp.Value)
   377  		}
   378  	}
   379  
   380  	require.Equal(t, "Query",
   381  		ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
   382  }
   383  
   384  func TestServiceSetMetadata(t *testing.T) {
   385  	ctrl := xtest.NewController(t)
   386  	defer ctrl.Finish()
   387  
   388  	size := 100
   389  	mockDB := storage.NewMockDatabase(ctrl)
   390  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   391  	metas := make([]string, 0, size)
   392  	for i := 0; i < size; i++ {
   393  		metas = append(metas, fmt.Sprint(i))
   394  	}
   395  
   396  	var wg sync.WaitGroup
   397  	for _, md := range metas {
   398  		wg.Add(1)
   399  		md := md
   400  		go func() {
   401  			service.SetMetadata(md, md)
   402  			wg.Done()
   403  		}()
   404  	}
   405  
   406  	wg.Wait()
   407  	for _, md := range metas {
   408  		wg.Add(1)
   409  		md := md
   410  		go func() {
   411  			meta, ok := service.Metadata(md)
   412  			assert.True(t, ok)
   413  			assert.Equal(t, meta, md)
   414  			wg.Done()
   415  		}()
   416  	}
   417  
   418  	wg.Wait()
   419  }
   420  
   421  func TestServiceQueryOverloaded(t *testing.T) {
   422  	ctrl := xtest.NewController(t)
   423  	defer ctrl.Finish()
   424  
   425  	mockDB := storage.NewMockDatabase(ctrl)
   426  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   427  	mockDB.EXPECT().IsOverloaded().Return(true)
   428  
   429  	var (
   430  		service = NewService(mockDB, testTChannelThriftOptions).(*service)
   431  		tctx, _ = tchannelthrift.NewContext(time.Minute)
   432  		ctx     = tchannelthrift.Context(tctx)
   433  		start   = xtime.Now().Add(-2 * time.Hour)
   434  		end     = start.Add(2 * time.Hour)
   435  		enc     = testStorageOpts.EncoderPool().Get()
   436  		nsID    = "metrics"
   437  		limit   = int64(100)
   438  	)
   439  
   440  	defer ctx.Close()
   441  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   442  	enc.Reset(start, 0, nil)
   443  
   444  	_, err := service.Query(tctx, &rpc.QueryRequest{
   445  		Query: &rpc.Query{
   446  			Regexp: &rpc.RegexpQuery{
   447  				Field:  "foo",
   448  				Regexp: "b.*",
   449  			},
   450  		},
   451  		RangeStart:     start.Seconds(),
   452  		RangeEnd:       end.Seconds(),
   453  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   454  		NameSpace:      nsID,
   455  		Limit:          &limit,
   456  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   457  	})
   458  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
   459  }
   460  
   461  func TestServiceQueryDatabaseNotSet(t *testing.T) {
   462  	ctrl := xtest.NewController(t)
   463  	defer ctrl.Finish()
   464  
   465  	var (
   466  		service = NewService(nil, testTChannelThriftOptions).(*service)
   467  		tctx, _ = tchannelthrift.NewContext(time.Minute)
   468  		ctx     = tchannelthrift.Context(tctx)
   469  		start   = xtime.Now().Add(-2 * time.Hour)
   470  		end     = start.Add(2 * time.Hour)
   471  		enc     = testStorageOpts.EncoderPool().Get()
   472  		nsID    = "metrics"
   473  		limit   = int64(100)
   474  	)
   475  
   476  	defer ctx.Close()
   477  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   478  	enc.Reset(start, 0, nil)
   479  
   480  	_, err := service.Query(tctx, &rpc.QueryRequest{
   481  		Query: &rpc.Query{
   482  			Regexp: &rpc.RegexpQuery{
   483  				Field:  "foo",
   484  				Regexp: "b.*",
   485  			},
   486  		},
   487  		RangeStart:     start.Seconds(),
   488  		RangeEnd:       end.Seconds(),
   489  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   490  		NameSpace:      nsID,
   491  		Limit:          &limit,
   492  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   493  	})
   494  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
   495  }
   496  
   497  func TestServiceQueryUnknownErr(t *testing.T) {
   498  	ctrl := xtest.NewController(t)
   499  	defer ctrl.Finish()
   500  
   501  	mockDB := storage.NewMockDatabase(ctrl)
   502  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   503  	mockDB.EXPECT().IsOverloaded().Return(false)
   504  
   505  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   506  
   507  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   508  	ctx := tchannelthrift.Context(tctx)
   509  	defer ctx.Close()
   510  
   511  	start := xtime.Now().Add(-2 * time.Hour)
   512  	end := start.Add(2 * time.Hour)
   513  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   514  	enc := testStorageOpts.EncoderPool().Get()
   515  	enc.Reset(start, 0, nil)
   516  
   517  	nsID := "metrics"
   518  	unknownErr := fmt.Errorf("unknown-error")
   519  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
   520  	require.NoError(t, err)
   521  	qry := index.Query{Query: req}
   522  
   523  	mockDB.EXPECT().QueryIDs(
   524  		ctx,
   525  		ident.NewIDMatcher(nsID),
   526  		index.NewQueryMatcher(qry),
   527  		index.QueryOptions{
   528  			StartInclusive: start,
   529  			EndExclusive:   end,
   530  			SeriesLimit:    10,
   531  		}).Return(index.QueryResult{}, unknownErr)
   532  
   533  	limit := int64(10)
   534  	_, err = service.Query(tctx, &rpc.QueryRequest{
   535  		Query: &rpc.Query{
   536  			Regexp: &rpc.RegexpQuery{
   537  				Field:  "foo",
   538  				Regexp: "b.*",
   539  			},
   540  		},
   541  		RangeStart:     start.Seconds(),
   542  		RangeEnd:       end.Seconds(),
   543  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   544  		NameSpace:      nsID,
   545  		Limit:          &limit,
   546  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   547  	})
   548  	require.Error(t, err)
   549  	require.Equal(t, convert.ToRPCError(unknownErr), err)
   550  }
   551  
   552  func TestServiceFetch(t *testing.T) {
   553  	ctrl := xtest.NewController(t)
   554  	defer ctrl.Finish()
   555  
   556  	mockDB := storage.NewMockDatabase(ctrl)
   557  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   558  	mockDB.EXPECT().IsOverloaded().Return(false)
   559  
   560  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   561  
   562  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   563  	ctx := tchannelthrift.Context(tctx)
   564  	defer ctx.Close()
   565  
   566  	start := xtime.Now().Add(-2 * time.Hour)
   567  	end := start.Add(2 * time.Hour)
   568  
   569  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   570  
   571  	enc := testStorageOpts.EncoderPool().Get()
   572  	enc.Reset(start, 0, nil)
   573  
   574  	nsID := "metrics"
   575  
   576  	values := []struct {
   577  		t xtime.UnixNano
   578  		v float64
   579  	}{
   580  		{start.Add(10 * time.Second), 1.0},
   581  		{start.Add(20 * time.Second), 2.0},
   582  	}
   583  	for _, v := range values {
   584  		dp := ts.Datapoint{
   585  			TimestampNanos: v.t,
   586  			Value:          v.v,
   587  		}
   588  		require.NoError(t, enc.Encode(dp, xtime.Second, nil))
   589  	}
   590  
   591  	stream, _ := enc.Stream(ctx)
   592  	mockDB.EXPECT().
   593  		ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher("foo"), start, end).
   594  		Return(&series.FakeBlockReaderIter{
   595  			Readers: [][]xio.BlockReader{
   596  				{
   597  					{
   598  						SegmentReader: stream,
   599  					},
   600  				},
   601  			},
   602  		}, nil)
   603  
   604  	r, err := service.Fetch(tctx, &rpc.FetchRequest{
   605  		RangeStart:     start.Seconds(),
   606  		RangeEnd:       end.Seconds(),
   607  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   608  		NameSpace:      nsID,
   609  		ID:             "foo",
   610  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   611  	})
   612  	require.NoError(t, err)
   613  
   614  	require.Equal(t, len(values), len(r.Datapoints))
   615  	for i, v := range values {
   616  		assert.Equal(t, v.t.Seconds(), r.Datapoints[i].Timestamp)
   617  		assert.Equal(t, v.v, r.Datapoints[i].Value)
   618  	}
   619  
   620  	require.Equal(t, "Fetch",
   621  		ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
   622  }
   623  
   624  func TestServiceFetchIsOverloaded(t *testing.T) {
   625  	ctrl := xtest.NewController(t)
   626  	defer ctrl.Finish()
   627  
   628  	mockDB := storage.NewMockDatabase(ctrl)
   629  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   630  	mockDB.EXPECT().IsOverloaded().Return(true)
   631  
   632  	var (
   633  		service = NewService(mockDB, testTChannelThriftOptions).(*service)
   634  		tctx, _ = tchannelthrift.NewContext(time.Minute)
   635  		ctx     = tchannelthrift.Context(tctx)
   636  		start   = xtime.Now().Add(-2 * time.Hour)
   637  		end     = start.Add(2 * time.Hour)
   638  		enc     = testStorageOpts.EncoderPool().Get()
   639  		nsID    = "metrics"
   640  	)
   641  
   642  	defer ctx.Close()
   643  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   644  	enc.Reset(start, 0, nil)
   645  
   646  	_, err := service.Fetch(tctx, &rpc.FetchRequest{
   647  		RangeStart:     start.Seconds(),
   648  		RangeEnd:       end.Seconds(),
   649  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   650  		NameSpace:      nsID,
   651  		ID:             "foo",
   652  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   653  	})
   654  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
   655  }
   656  
   657  func TestServiceFetchDatabaseNotSet(t *testing.T) {
   658  	ctrl := xtest.NewController(t)
   659  	defer ctrl.Finish()
   660  
   661  	var (
   662  		service = NewService(nil, testTChannelThriftOptions).(*service)
   663  		tctx, _ = tchannelthrift.NewContext(time.Minute)
   664  		ctx     = tchannelthrift.Context(tctx)
   665  		start   = xtime.Now().Add(-2 * time.Hour)
   666  		end     = start.Add(2 * time.Hour)
   667  		enc     = testStorageOpts.EncoderPool().Get()
   668  		nsID    = "metrics"
   669  	)
   670  
   671  	defer ctx.Close()
   672  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   673  	enc.Reset(start, 0, nil)
   674  
   675  	_, err := service.Fetch(tctx, &rpc.FetchRequest{
   676  		RangeStart:     start.Seconds(),
   677  		RangeEnd:       end.Seconds(),
   678  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   679  		NameSpace:      nsID,
   680  		ID:             "foo",
   681  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   682  	})
   683  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
   684  }
   685  
   686  func TestServiceFetchUnknownErr(t *testing.T) {
   687  	ctrl := xtest.NewController(t)
   688  	defer ctrl.Finish()
   689  	mockDB := storage.NewMockDatabase(ctrl)
   690  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   691  	mockDB.EXPECT().IsOverloaded().Return(false)
   692  
   693  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   694  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   695  	ctx := tchannelthrift.Context(tctx)
   696  	defer ctx.Close()
   697  
   698  	start := xtime.Now().Add(-2 * time.Hour)
   699  	end := start.Add(2 * time.Hour)
   700  
   701  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   702  	nsID := "metrics"
   703  	unknownErr := fmt.Errorf("unknown-err")
   704  
   705  	mockDB.EXPECT().
   706  		ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher("foo"), start, end).
   707  		Return(nil, unknownErr)
   708  
   709  	_, err := service.Fetch(tctx, &rpc.FetchRequest{
   710  		RangeStart:     start.Seconds(),
   711  		RangeEnd:       end.Seconds(),
   712  		RangeType:      rpc.TimeType_UNIX_SECONDS,
   713  		NameSpace:      nsID,
   714  		ID:             "foo",
   715  		ResultTimeType: rpc.TimeType_UNIX_SECONDS,
   716  	})
   717  	require.Error(t, err)
   718  	require.Equal(t, convert.ToRPCError(unknownErr), err)
   719  }
   720  
   721  func TestServiceFetchBatchRaw(t *testing.T) {
   722  	ctrl := xtest.NewController(t)
   723  	defer ctrl.Finish()
   724  
   725  	mockDB := storage.NewMockDatabase(ctrl)
   726  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   727  	mockDB.EXPECT().IsOverloaded().Return(false)
   728  
   729  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   730  
   731  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   732  	ctx := tchannelthrift.Context(tctx)
   733  	defer ctx.Close()
   734  
   735  	start := xtime.Now().Add(-2 * time.Hour)
   736  	end := start.Add(2 * time.Hour)
   737  
   738  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   739  
   740  	nsID := "metrics"
   741  
   742  	streams := map[string]xio.SegmentReader{}
   743  	seriesData := map[string][]struct {
   744  		t xtime.UnixNano
   745  		v float64
   746  	}{
   747  		"foo": {
   748  			{start.Add(10 * time.Second), 1.0},
   749  			{start.Add(20 * time.Second), 2.0},
   750  		},
   751  		"bar": {
   752  			{start.Add(20 * time.Second), 3.0},
   753  			{start.Add(30 * time.Second), 4.0},
   754  		},
   755  	}
   756  	for id, s := range seriesData {
   757  		enc := testStorageOpts.EncoderPool().Get()
   758  		enc.Reset(start, 0, nil)
   759  		for _, v := range s {
   760  			dp := ts.Datapoint{
   761  				TimestampNanos: v.t,
   762  				Value:          v.v,
   763  			}
   764  			require.NoError(t, enc.Encode(dp, xtime.Second, nil))
   765  		}
   766  
   767  		stream, _ := enc.Stream(ctx)
   768  		streams[id] = stream
   769  		mockDB.EXPECT().
   770  			ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
   771  			Return(&series.FakeBlockReaderIter{
   772  				Readers: [][]xio.BlockReader{
   773  					{
   774  						{
   775  							SegmentReader: stream,
   776  						},
   777  					},
   778  				},
   779  			}, nil)
   780  	}
   781  
   782  	ids := [][]byte{[]byte("foo"), []byte("bar")}
   783  	r, err := service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
   784  		RangeStart:    start.Seconds(),
   785  		RangeEnd:      end.Seconds(),
   786  		RangeTimeType: rpc.TimeType_UNIX_SECONDS,
   787  		NameSpace:     []byte(nsID),
   788  		Ids:           ids,
   789  	})
   790  	require.NoError(t, err)
   791  
   792  	require.Equal(t, len(ids), len(r.Elements))
   793  	for i, id := range ids {
   794  		elem := r.Elements[i]
   795  		require.NotNil(t, elem)
   796  
   797  		assert.Nil(t, elem.Err)
   798  		require.Equal(t, 1, len(elem.Segments))
   799  
   800  		seg := elem.Segments[0]
   801  		require.NotNil(t, seg)
   802  		require.NotNil(t, seg.Merged)
   803  
   804  		var expectHead, expectTail []byte
   805  		expectSegment, err := streams[string(id)].Segment()
   806  		require.NoError(t, err)
   807  
   808  		if expectSegment.Head != nil {
   809  			expectHead = expectSegment.Head.Bytes()
   810  		}
   811  		if expectSegment.Tail != nil {
   812  			expectTail = expectSegment.Tail.Bytes()
   813  		}
   814  
   815  		assert.Equal(t, expectHead, seg.Merged.Head)
   816  		assert.Equal(t, expectTail, seg.Merged.Tail)
   817  	}
   818  
   819  	require.Equal(t, "FetchBatchRaw",
   820  		ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
   821  }
   822  
   823  func TestServiceFetchBatchRawV2MultiNS(t *testing.T) {
   824  	ctrl := xtest.NewController(t)
   825  	defer ctrl.Finish()
   826  
   827  	mockDB := storage.NewMockDatabase(ctrl)
   828  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   829  	mockDB.EXPECT().IsOverloaded().Return(false)
   830  
   831  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
   832  
   833  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   834  	ctx := tchannelthrift.Context(tctx)
   835  	defer ctx.Close()
   836  
   837  	start := xtime.Now().Add(-2 * time.Hour)
   838  	end := start.Add(2 * time.Hour)
   839  
   840  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   841  
   842  	nsID1 := "metrics1"
   843  	nsID2 := "metrics2"
   844  
   845  	streams := map[string]xio.SegmentReader{}
   846  	seriesData := map[string][]struct {
   847  		t xtime.UnixNano
   848  		v float64
   849  	}{
   850  		"foo": {
   851  			{start.Add(10 * time.Second), 1.0},
   852  			{start.Add(20 * time.Second), 2.0},
   853  		},
   854  		"bar": {
   855  			{start.Add(20 * time.Second), 3.0},
   856  			{start.Add(30 * time.Second), 4.0},
   857  		},
   858  	}
   859  	for id, s := range seriesData {
   860  		enc := testStorageOpts.EncoderPool().Get()
   861  		enc.Reset(start, 0, nil)
   862  		for _, v := range s {
   863  			dp := ts.Datapoint{
   864  				TimestampNanos: v.t,
   865  				Value:          v.v,
   866  			}
   867  			require.NoError(t, enc.Encode(dp, xtime.Second, nil))
   868  		}
   869  
   870  		stream, _ := enc.Stream(ctx)
   871  		streams[id] = stream
   872  		nsID := nsID1
   873  		if id == "bar" {
   874  			nsID = nsID2
   875  		}
   876  		mockDB.EXPECT().
   877  			ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
   878  			Return(&series.FakeBlockReaderIter{
   879  				Readers: [][]xio.BlockReader{
   880  					{
   881  						{
   882  							SegmentReader: stream,
   883  						},
   884  					},
   885  				},
   886  			}, nil)
   887  	}
   888  
   889  	ids := [][]byte{[]byte("foo"), []byte("bar")}
   890  	elements := []*rpc.FetchBatchRawV2RequestElement{
   891  		{
   892  			NameSpace:     0,
   893  			RangeStart:    start.Seconds(),
   894  			RangeEnd:      end.Seconds(),
   895  			ID:            []byte("foo"),
   896  			RangeTimeType: rpc.TimeType_UNIX_SECONDS,
   897  		},
   898  		{
   899  			NameSpace:     1,
   900  			RangeStart:    start.Seconds(),
   901  			RangeEnd:      end.Seconds(),
   902  			ID:            []byte("bar"),
   903  			RangeTimeType: rpc.TimeType_UNIX_SECONDS,
   904  		},
   905  	}
   906  	r, err := service.FetchBatchRawV2(tctx, &rpc.FetchBatchRawV2Request{
   907  		NameSpaces: [][]byte{[]byte(nsID1), []byte(nsID2)},
   908  		Elements:   elements,
   909  	})
   910  	require.NoError(t, err)
   911  
   912  	require.Equal(t, len(ids), len(r.Elements))
   913  	for i, id := range ids {
   914  		elem := r.Elements[i]
   915  		require.NotNil(t, elem)
   916  
   917  		assert.Nil(t, elem.Err)
   918  		require.Equal(t, 1, len(elem.Segments))
   919  
   920  		seg := elem.Segments[0]
   921  		require.NotNil(t, seg)
   922  		require.NotNil(t, seg.Merged)
   923  
   924  		var expectHead, expectTail []byte
   925  		expectSegment, err := streams[string(id)].Segment()
   926  		require.NoError(t, err)
   927  
   928  		if expectSegment.Head != nil {
   929  			expectHead = expectSegment.Head.Bytes()
   930  		}
   931  		if expectSegment.Tail != nil {
   932  			expectTail = expectSegment.Tail.Bytes()
   933  		}
   934  
   935  		assert.Equal(t, expectHead, seg.Merged.Head)
   936  		assert.Equal(t, expectTail, seg.Merged.Tail)
   937  	}
   938  
   939  	require.Equal(t, "FetchBatchRawV2",
   940  		ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
   941  }
   942  
   943  // TestServiceFetchBatchRawOverMaxOutstandingRequests tests that the FetchBatchRaw endpoint
   944  // will reject requests if the number of outstanding read requests has hit the maximum.
   945  func TestServiceFetchBatchRawOverMaxOutstandingRequests(t *testing.T) {
   946  	ctrl := xtest.NewController(t)
   947  	defer ctrl.Finish()
   948  
   949  	mockDB := storage.NewMockDatabase(ctrl)
   950  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
   951  	mockDB.EXPECT().IsOverloaded().Return(false)
   952  
   953  	tchanOpts := testTChannelThriftOptions.
   954  		SetMaxOutstandingReadRequests(1)
   955  	service := NewService(mockDB, tchanOpts).(*service)
   956  
   957  	tctx, _ := tchannelthrift.NewContext(time.Minute)
   958  	ctx := tchannelthrift.Context(tctx)
   959  	defer ctx.Close()
   960  
   961  	start := xtime.Now().Add(-2 * time.Hour)
   962  	end := start.Add(2 * time.Hour)
   963  
   964  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
   965  
   966  	var (
   967  		nsID       = "metrics"
   968  		streams    = map[string]xio.SegmentReader{}
   969  		seriesData = map[string][]struct {
   970  			t xtime.UnixNano
   971  			v float64
   972  		}{
   973  			"foo": {
   974  				{start.Add(10 * time.Second), 1.0},
   975  				{start.Add(20 * time.Second), 2.0},
   976  			},
   977  		}
   978  
   979  		requestIsOutstanding = make(chan struct{}, 0)
   980  		testIsComplete       = make(chan struct{}, 0)
   981  	)
   982  	for id, s := range seriesData {
   983  		enc := testStorageOpts.EncoderPool().Get()
   984  		enc.Reset(start, 0, nil)
   985  		for _, v := range s {
   986  			dp := ts.Datapoint{
   987  				TimestampNanos: v.t,
   988  				Value:          v.v,
   989  			}
   990  			require.NoError(t, enc.Encode(dp, xtime.Second, nil))
   991  		}
   992  
   993  		stream, _ := enc.Stream(ctx)
   994  		streams[id] = stream
   995  		mockDB.EXPECT().
   996  			ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
   997  			Do(func(ctx interface{}, nsID ident.ID, seriesID ident.ID, start xtime.UnixNano, end xtime.UnixNano) {
   998  				close(requestIsOutstanding)
   999  				<-testIsComplete
  1000  			}).
  1001  			Return(&series.FakeBlockReaderIter{
  1002  				Readers: [][]xio.BlockReader{
  1003  					{
  1004  						{
  1005  							SegmentReader: stream,
  1006  						},
  1007  					},
  1008  				},
  1009  			}, nil)
  1010  	}
  1011  
  1012  	var (
  1013  		ids                          = [][]byte{[]byte("foo")}
  1014  		outstandingRequestIsComplete = make(chan struct{}, 0)
  1015  	)
  1016  	// First request will hang until the test is over simulating an "outstanding" request.
  1017  	go func() {
  1018  		service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
  1019  			RangeStart:    start.Seconds(),
  1020  			RangeEnd:      end.Seconds(),
  1021  			RangeTimeType: rpc.TimeType_UNIX_SECONDS,
  1022  			NameSpace:     []byte(nsID),
  1023  			Ids:           ids,
  1024  		})
  1025  		close(outstandingRequestIsComplete)
  1026  	}()
  1027  
  1028  	<-requestIsOutstanding
  1029  	_, err := service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
  1030  		RangeStart:    start.Seconds(),
  1031  		RangeEnd:      end.Seconds(),
  1032  		RangeTimeType: rpc.TimeType_UNIX_SECONDS,
  1033  		NameSpace:     []byte(nsID),
  1034  		Ids:           ids,
  1035  	})
  1036  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  1037  	close(testIsComplete)
  1038  
  1039  	// Ensure the number of outstanding requests gets decremented at the end of the R.P.C.
  1040  	<-outstandingRequestIsComplete
  1041  	require.Equal(t, 0, service.state.numOutstandingReadRPCs)
  1042  }
  1043  
  1044  func TestServiceFetchBatchRawUnknownError(t *testing.T) {
  1045  	ctrl := xtest.NewController(t)
  1046  	defer ctrl.Finish()
  1047  
  1048  	mockDB := storage.NewMockDatabase(ctrl)
  1049  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1050  	mockDB.EXPECT().IsOverloaded().Return(false)
  1051  
  1052  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  1053  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  1054  	ctx := tchannelthrift.Context(tctx)
  1055  	defer ctx.Close()
  1056  
  1057  	start := xtime.Now().Add(-2 * time.Hour)
  1058  	end := start.Add(2 * time.Hour)
  1059  
  1060  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1061  
  1062  	nsID := "metrics"
  1063  	unknownErr := fmt.Errorf("unknown-err")
  1064  
  1065  	series := map[string][]struct {
  1066  		t xtime.UnixNano
  1067  		v float64
  1068  	}{
  1069  		"foo": {
  1070  			{start.Add(10 * time.Second), 1.0},
  1071  			{start.Add(20 * time.Second), 2.0},
  1072  		},
  1073  	}
  1074  	for id := range series {
  1075  		mockDB.EXPECT().
  1076  			ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
  1077  			Return(nil, unknownErr)
  1078  	}
  1079  
  1080  	ids := [][]byte{[]byte("foo")}
  1081  	r, err := service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
  1082  		RangeStart:    start.Seconds(),
  1083  		RangeEnd:      end.Seconds(),
  1084  		RangeTimeType: rpc.TimeType_UNIX_SECONDS,
  1085  		NameSpace:     []byte(nsID),
  1086  		Ids:           ids,
  1087  	})
  1088  	require.NoError(t, err)
  1089  	require.Len(t, r.Elements, 1)
  1090  	require.Equal(t, convert.ToRPCError(unknownErr), r.Elements[0].Err)
  1091  }
  1092  
  1093  func TestServiceFetchBatchRawIsOverloaded(t *testing.T) {
  1094  	ctrl := xtest.NewController(t)
  1095  	defer ctrl.Finish()
  1096  
  1097  	mockDB := storage.NewMockDatabase(ctrl)
  1098  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1099  	mockDB.EXPECT().IsOverloaded().Return(true)
  1100  
  1101  	var (
  1102  		service = NewService(mockDB, testTChannelThriftOptions).(*service)
  1103  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1104  		ctx     = tchannelthrift.Context(tctx)
  1105  		start   = xtime.Now().Add(-2 * time.Hour)
  1106  		end     = start.Add(2 * time.Hour)
  1107  		enc     = testStorageOpts.EncoderPool().Get()
  1108  		nsID    = "metrics"
  1109  		ids     = [][]byte{[]byte("foo"), []byte("bar")}
  1110  	)
  1111  
  1112  	defer ctx.Close()
  1113  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1114  	enc.Reset(start, 0, nil)
  1115  
  1116  	_, err := service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
  1117  		RangeStart:    start.Seconds(),
  1118  		RangeEnd:      end.Seconds(),
  1119  		RangeTimeType: rpc.TimeType_UNIX_SECONDS,
  1120  		NameSpace:     []byte(nsID),
  1121  		Ids:           ids,
  1122  	})
  1123  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  1124  }
  1125  
  1126  func TestServiceFetchBatchRawDatabaseNotSet(t *testing.T) {
  1127  	ctrl := xtest.NewController(t)
  1128  	defer ctrl.Finish()
  1129  
  1130  	var (
  1131  		service = NewService(nil, testTChannelThriftOptions).(*service)
  1132  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1133  		ctx     = tchannelthrift.Context(tctx)
  1134  		start   = xtime.Now().Add(-2 * time.Hour)
  1135  		end     = start.Add(2 * time.Hour)
  1136  		nsID    = "metrics"
  1137  		ids     = [][]byte{[]byte("foo"), []byte("bar")}
  1138  	)
  1139  
  1140  	defer ctx.Close()
  1141  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1142  
  1143  	_, err := service.FetchBatchRaw(tctx, &rpc.FetchBatchRawRequest{
  1144  		RangeStart:    start.Seconds(),
  1145  		RangeEnd:      end.Seconds(),
  1146  		RangeTimeType: rpc.TimeType_UNIX_SECONDS,
  1147  		NameSpace:     []byte(nsID),
  1148  		Ids:           ids,
  1149  	})
  1150  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  1151  }
  1152  
  1153  func TestServiceFetchBlocksRaw(t *testing.T) {
  1154  	ctrl := xtest.NewController(t)
  1155  	defer ctrl.Finish()
  1156  
  1157  	nsID := "metrics"
  1158  	mockNs := storage.NewMockNamespace(ctrl)
  1159  	mockNs.EXPECT().Options().Return(testNamespaceOptions).AnyTimes()
  1160  	mockDB := storage.NewMockDatabase(ctrl)
  1161  	mockDB.EXPECT().Namespace(ident.NewIDMatcher(nsID)).Return(mockNs, true).AnyTimes()
  1162  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1163  	mockDB.EXPECT().IsOverloaded().Return(false)
  1164  
  1165  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  1166  
  1167  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  1168  	ctx := tchannelthrift.Context(tctx)
  1169  	defer ctx.Close()
  1170  
  1171  	start := xtime.Now().Add(-2 * time.Hour).Truncate(time.Second)
  1172  	starts := []xtime.UnixNano{start}
  1173  
  1174  	streams := map[string]xio.SegmentReader{}
  1175  	checksums := map[string]uint32{}
  1176  	series := map[string][]struct {
  1177  		t xtime.UnixNano
  1178  		v float64
  1179  	}{
  1180  		"foo": {
  1181  			{start.Add(10 * time.Second), 1.0},
  1182  			{start.Add(20 * time.Second), 2.0},
  1183  		},
  1184  		"bar": {
  1185  			{start.Add(20 * time.Second), 3.0},
  1186  			{start.Add(30 * time.Second), 4.0},
  1187  		},
  1188  	}
  1189  	for id, s := range series {
  1190  		enc := testStorageOpts.EncoderPool().Get()
  1191  		enc.Reset(start, 0, nil)
  1192  		for _, v := range s {
  1193  			dp := ts.Datapoint{
  1194  				TimestampNanos: v.t,
  1195  				Value:          v.v,
  1196  			}
  1197  			require.NoError(t, enc.Encode(dp, xtime.Second, nil))
  1198  		}
  1199  
  1200  		stream, _ := enc.Stream(ctx)
  1201  		streams[id] = stream
  1202  
  1203  		seg, err := streams[id].Segment()
  1204  		require.NoError(t, err)
  1205  
  1206  		checksums[id] = seg.CalculateChecksum()
  1207  		expectedBlockReader := []xio.BlockReader{
  1208  			{
  1209  				SegmentReader: stream,
  1210  				Start:         start,
  1211  			},
  1212  		}
  1213  
  1214  		mockDB.EXPECT().
  1215  			FetchBlocks(ctx, ident.NewIDMatcher(nsID), uint32(0), ident.NewIDMatcher(id), starts).
  1216  			Return([]block.FetchBlockResult{
  1217  				block.NewFetchBlockResult(start, expectedBlockReader, nil),
  1218  			}, nil)
  1219  	}
  1220  
  1221  	ids := [][]byte{[]byte("foo"), []byte("bar")}
  1222  	r, err := service.FetchBlocksRaw(tctx, &rpc.FetchBlocksRawRequest{
  1223  		NameSpace: []byte(nsID),
  1224  		Shard:     0,
  1225  		Elements: []*rpc.FetchBlocksRawRequestElement{
  1226  			{
  1227  				ID:     ids[0],
  1228  				Starts: []int64{int64(start)},
  1229  			},
  1230  			{
  1231  				ID:     ids[1],
  1232  				Starts: []int64{int64(start)},
  1233  			},
  1234  		},
  1235  	})
  1236  	require.NoError(t, err)
  1237  
  1238  	require.Equal(t, len(ids), len(r.Elements))
  1239  	for i, id := range ids {
  1240  		elem := r.Elements[i]
  1241  		require.NotNil(t, elem)
  1242  		assert.Equal(t, id, elem.ID)
  1243  
  1244  		require.Equal(t, 1, len(elem.Blocks))
  1245  		require.Nil(t, elem.Blocks[0].Err)
  1246  		require.Equal(t, checksums[string(id)], uint32(*(elem.Blocks[0].Checksum)))
  1247  
  1248  		seg := elem.Blocks[0].Segments
  1249  		require.NotNil(t, seg)
  1250  		require.NotNil(t, seg.Merged)
  1251  
  1252  		var expectHead, expectTail []byte
  1253  		expectSegment, err := streams[string(id)].Segment()
  1254  		require.NoError(t, err)
  1255  
  1256  		if expectSegment.Head != nil {
  1257  			expectHead = expectSegment.Head.Bytes()
  1258  		}
  1259  		if expectSegment.Tail != nil {
  1260  			expectTail = expectSegment.Tail.Bytes()
  1261  		}
  1262  
  1263  		assert.Equal(t, expectHead, seg.Merged.Head)
  1264  		assert.Equal(t, expectTail, seg.Merged.Tail)
  1265  	}
  1266  }
  1267  
  1268  func TestServiceFetchBlocksRawIsOverloaded(t *testing.T) {
  1269  	ctrl := xtest.NewController(t)
  1270  	defer ctrl.Finish()
  1271  
  1272  	nsID := "metrics"
  1273  	mockNs := storage.NewMockNamespace(ctrl)
  1274  	mockNs.EXPECT().Options().Return(testNamespaceOptions).AnyTimes()
  1275  	mockDB := storage.NewMockDatabase(ctrl)
  1276  	mockDB.EXPECT().Namespace(ident.NewIDMatcher(nsID)).Return(mockNs, true).AnyTimes()
  1277  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1278  	mockDB.EXPECT().IsOverloaded().Return(true)
  1279  
  1280  	var (
  1281  		service = NewService(mockDB, testTChannelThriftOptions).(*service)
  1282  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1283  		ctx     = tchannelthrift.Context(tctx)
  1284  		start   = xtime.Now().Add(-2 * time.Hour)
  1285  		end     = start.Add(2 * time.Hour)
  1286  		enc     = testStorageOpts.EncoderPool().Get()
  1287  		ids     = [][]byte{[]byte("foo"), []byte("bar")}
  1288  	)
  1289  
  1290  	defer ctx.Close()
  1291  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1292  	enc.Reset(start, 0, nil)
  1293  
  1294  	_, err := service.FetchBlocksRaw(tctx, &rpc.FetchBlocksRawRequest{
  1295  		NameSpace: []byte(nsID),
  1296  		Shard:     0,
  1297  		Elements: []*rpc.FetchBlocksRawRequestElement{
  1298  			{
  1299  				ID:     ids[0],
  1300  				Starts: []int64{int64(start)},
  1301  			},
  1302  			{
  1303  				ID:     ids[1],
  1304  				Starts: []int64{int64(start)},
  1305  			},
  1306  		},
  1307  	})
  1308  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  1309  }
  1310  
  1311  func TestServiceFetchBlocksRawDatabaseNotSet(t *testing.T) {
  1312  	ctrl := xtest.NewController(t)
  1313  	defer ctrl.Finish()
  1314  
  1315  	var (
  1316  		nsID    = "metrics"
  1317  		service = NewService(nil, testTChannelThriftOptions).(*service)
  1318  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1319  		ctx     = tchannelthrift.Context(tctx)
  1320  		start   = xtime.Now().Add(-2 * time.Hour)
  1321  		end     = start.Add(2 * time.Hour)
  1322  		enc     = testStorageOpts.EncoderPool().Get()
  1323  		ids     = [][]byte{[]byte("foo"), []byte("bar")}
  1324  	)
  1325  
  1326  	defer ctx.Close()
  1327  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1328  	enc.Reset(start, 0, nil)
  1329  
  1330  	_, err := service.FetchBlocksRaw(tctx, &rpc.FetchBlocksRawRequest{
  1331  		NameSpace: []byte(nsID),
  1332  		Shard:     0,
  1333  		Elements: []*rpc.FetchBlocksRawRequestElement{
  1334  			{
  1335  				ID:     ids[0],
  1336  				Starts: []int64{int64(start)},
  1337  			},
  1338  			{
  1339  				ID:     ids[1],
  1340  				Starts: []int64{int64(start)},
  1341  			},
  1342  		},
  1343  	})
  1344  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  1345  }
  1346  
  1347  func TestServiceFetchBlocksMetadataEndpointV2Raw(t *testing.T) {
  1348  	ctrl := xtest.NewController(t)
  1349  	defer ctrl.Finish()
  1350  
  1351  	// Setup mock db / service / context
  1352  	mockDB := storage.NewMockDatabase(ctrl)
  1353  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1354  	mockDB.EXPECT().IsOverloaded().Return(false)
  1355  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  1356  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  1357  	ctx := tchannelthrift.Context(tctx)
  1358  	defer ctx.Close()
  1359  
  1360  	// Configure constants / options
  1361  	var (
  1362  		now                = xtime.Now()
  1363  		start              = now.Truncate(time.Hour)
  1364  		end                = now.Add(4 * time.Hour).Truncate(time.Hour)
  1365  		limit              = int64(2)
  1366  		nextPageTokenBytes = []byte("page_next")
  1367  		includeSizes       = true
  1368  		includeChecksums   = true
  1369  		includeLastRead    = true
  1370  		nsID               = "metrics"
  1371  	)
  1372  
  1373  	// Prepare test data
  1374  	type testBlock struct {
  1375  		start    xtime.UnixNano
  1376  		size     int64
  1377  		checksum uint32
  1378  		lastRead xtime.UnixNano
  1379  	}
  1380  	series := map[string]struct {
  1381  		tags ident.Tags
  1382  		data []testBlock
  1383  	}{
  1384  		"foo": {
  1385  			// Check with tags
  1386  			tags: ident.NewTags(
  1387  				ident.StringTag("aaa", "bbb"),
  1388  				ident.StringTag("ccc", "ddd"),
  1389  			),
  1390  			data: []testBlock{
  1391  				{start.Add(0 * time.Hour), 16, 111, xtime.Now().Add(-time.Minute)},
  1392  				{start.Add(2 * time.Hour), 32, 222, 0},
  1393  			},
  1394  		},
  1395  		"bar": {
  1396  			// And without tags
  1397  			tags: ident.Tags{},
  1398  			data: []testBlock{
  1399  				{start.Add(0 * time.Hour), 32, 222, 0},
  1400  				{start.Add(2 * time.Hour), 64, 333, xtime.Now().Add(-time.Minute)},
  1401  			},
  1402  		},
  1403  	}
  1404  	ids := make([][]byte, 0, len(series))
  1405  	mockResult := block.NewFetchBlocksMetadataResults()
  1406  	numBlocks := 0
  1407  	for id, s := range series {
  1408  		ids = append(ids, []byte(id))
  1409  		blocks := block.NewFetchBlockMetadataResults()
  1410  		metadata := block.NewFetchBlocksMetadataResult(ident.StringID(id),
  1411  			ident.NewTagsIterator(s.tags), blocks)
  1412  		for _, v := range s.data {
  1413  			numBlocks++
  1414  			entry := v
  1415  			blocks.Add(block.FetchBlockMetadataResult{
  1416  				Start:    entry.start,
  1417  				Size:     entry.size,
  1418  				Checksum: &entry.checksum,
  1419  				LastRead: entry.lastRead,
  1420  				Err:      nil,
  1421  			})
  1422  		}
  1423  		mockResult.Add(metadata)
  1424  	}
  1425  
  1426  	// Setup db expectations based on test data
  1427  	opts := block.FetchBlocksMetadataOptions{
  1428  		IncludeSizes:     includeSizes,
  1429  		IncludeChecksums: includeChecksums,
  1430  		IncludeLastRead:  includeLastRead,
  1431  	}
  1432  	mockDB.EXPECT().
  1433  		FetchBlocksMetadataV2(ctx, ident.NewIDMatcher(nsID), uint32(0), start, end,
  1434  			limit, nil, opts).
  1435  		Return(mockResult, nextPageTokenBytes, nil)
  1436  
  1437  	// Run RPC method
  1438  	r, err := service.FetchBlocksMetadataRawV2(tctx, &rpc.FetchBlocksMetadataRawV2Request{
  1439  		NameSpace:        []byte(nsID),
  1440  		Shard:            0,
  1441  		RangeStart:       int64(start),
  1442  		RangeEnd:         int64(end),
  1443  		Limit:            limit,
  1444  		PageToken:        nil,
  1445  		IncludeSizes:     &includeSizes,
  1446  		IncludeChecksums: &includeChecksums,
  1447  		IncludeLastRead:  &includeLastRead,
  1448  	})
  1449  	require.NoError(t, err)
  1450  
  1451  	// Assert response looks OK
  1452  	require.Equal(t, numBlocks, len(r.Elements))
  1453  	require.Equal(t, nextPageTokenBytes, r.NextPageToken)
  1454  
  1455  	// Assert all blocks are present
  1456  	for _, block := range r.Elements {
  1457  		require.NotNil(t, block)
  1458  
  1459  		expectedBlocks := series[string(block.ID)]
  1460  
  1461  		if len(expectedBlocks.tags.Values()) == 0 {
  1462  			require.Equal(t, 0, len(block.EncodedTags))
  1463  		} else {
  1464  			id := ident.BinaryID(checked.NewBytes(block.ID, nil))
  1465  
  1466  			actualTags, err := conv.FromSeriesIDAndEncodedTags(id.Bytes(), block.EncodedTags)
  1467  			require.NoError(t, err)
  1468  
  1469  			expectedTags, err := conv.FromSeriesIDAndTags(id, expectedBlocks.tags)
  1470  			require.NoError(t, err)
  1471  
  1472  			require.True(t, expectedTags.Equal(actualTags))
  1473  		}
  1474  
  1475  		foundMatch := false
  1476  		for _, expectedBlock := range expectedBlocks.data {
  1477  			if expectedBlock.start != xtime.UnixNano(block.Start) {
  1478  				continue
  1479  			}
  1480  			foundMatch = true
  1481  			require.NotNil(t, block.Size)
  1482  			require.NotNil(t, block.Checksum)
  1483  			require.NotNil(t, block.LastRead)
  1484  		}
  1485  		require.True(t, foundMatch)
  1486  	}
  1487  }
  1488  
  1489  func TestServiceFetchBlocksMetadataEndpointV2RawIsOverloaded(t *testing.T) {
  1490  	ctrl := xtest.NewController(t)
  1491  	defer ctrl.Finish()
  1492  
  1493  	// Setup mock db / service / context
  1494  	mockDB := storage.NewMockDatabase(ctrl)
  1495  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1496  	mockDB.EXPECT().IsOverloaded().Return(true)
  1497  
  1498  	// Configure constants / options
  1499  	var (
  1500  		service          = NewService(mockDB, testTChannelThriftOptions).(*service)
  1501  		tctx, _          = tchannelthrift.NewContext(time.Minute)
  1502  		ctx              = tchannelthrift.Context(tctx)
  1503  		now              = xtime.Now()
  1504  		start            = now.Truncate(time.Hour)
  1505  		end              = now.Add(4 * time.Hour).Truncate(time.Hour)
  1506  		limit            = int64(2)
  1507  		includeSizes     = true
  1508  		includeChecksums = true
  1509  		includeLastRead  = true
  1510  		nsID             = "metrics"
  1511  	)
  1512  
  1513  	defer ctx.Close()
  1514  
  1515  	// Run RPC method
  1516  	_, err := service.FetchBlocksMetadataRawV2(tctx, &rpc.FetchBlocksMetadataRawV2Request{
  1517  		NameSpace:        []byte(nsID),
  1518  		Shard:            0,
  1519  		RangeStart:       start.Seconds(),
  1520  		RangeEnd:         end.Seconds(),
  1521  		Limit:            limit,
  1522  		PageToken:        nil,
  1523  		IncludeSizes:     &includeSizes,
  1524  		IncludeChecksums: &includeChecksums,
  1525  		IncludeLastRead:  &includeLastRead,
  1526  	})
  1527  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  1528  }
  1529  
  1530  func TestServiceFetchBlocksMetadataEndpointV2RawDatabaseNotSet(t *testing.T) {
  1531  	ctrl := xtest.NewController(t)
  1532  	defer ctrl.Finish()
  1533  
  1534  	// Configure constants / options
  1535  	var (
  1536  		service          = NewService(nil, testTChannelThriftOptions).(*service)
  1537  		tctx, _          = tchannelthrift.NewContext(time.Minute)
  1538  		ctx              = tchannelthrift.Context(tctx)
  1539  		now              = xtime.Now()
  1540  		start            = now.Truncate(time.Hour)
  1541  		end              = now.Add(4 * time.Hour).Truncate(time.Hour)
  1542  		limit            = int64(2)
  1543  		includeSizes     = true
  1544  		includeChecksums = true
  1545  		includeLastRead  = true
  1546  		nsID             = "metrics"
  1547  	)
  1548  
  1549  	defer ctx.Close()
  1550  
  1551  	// Run RPC method
  1552  	_, err := service.FetchBlocksMetadataRawV2(tctx, &rpc.FetchBlocksMetadataRawV2Request{
  1553  		NameSpace:        []byte(nsID),
  1554  		Shard:            0,
  1555  		RangeStart:       int64(start),
  1556  		RangeEnd:         int64(end),
  1557  		Limit:            limit,
  1558  		PageToken:        nil,
  1559  		IncludeSizes:     &includeSizes,
  1560  		IncludeChecksums: &includeChecksums,
  1561  		IncludeLastRead:  &includeLastRead,
  1562  	})
  1563  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  1564  }
  1565  
  1566  //nolint:dupl
  1567  func TestServiceFetchTagged(t *testing.T) {
  1568  	testCases := []struct {
  1569  		name            string
  1570  		blocksReadLimit int64
  1571  		fetchErrMsg     string
  1572  		blockReadCancel bool
  1573  	}{
  1574  		{
  1575  			name: "happy path",
  1576  		},
  1577  		{
  1578  			name:            "block read limit",
  1579  			blocksReadLimit: 1,
  1580  			fetchErrMsg:     "query aborted due to limit",
  1581  		},
  1582  		{
  1583  			name:            "data read canceled",
  1584  			fetchErrMsg:     "context canceled",
  1585  			blockReadCancel: true,
  1586  		},
  1587  	}
  1588  
  1589  	for _, tc := range testCases {
  1590  		tc := tc
  1591  		t.Run(tc.name, func(t *testing.T) {
  1592  			ctrl := xtest.NewController(t)
  1593  			defer ctrl.Finish()
  1594  
  1595  			mockDB := storage.NewMockDatabase(ctrl)
  1596  			mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1597  			mockDB.EXPECT().IsOverloaded().Return(false)
  1598  			limitsOpts := limits.NewOptions().
  1599  				SetInstrumentOptions(testTChannelThriftOptions.InstrumentOptions()).
  1600  				SetBytesReadLimitOpts(limits.DefaultLookbackLimitOptions()).
  1601  				SetDiskSeriesReadLimitOpts(limits.LookbackLimitOptions{
  1602  					Limit:    tc.blocksReadLimit,
  1603  					Lookback: time.Second * 1,
  1604  				}).
  1605  				SetDocsLimitOpts(limits.DefaultLookbackLimitOptions())
  1606  
  1607  			queryLimits, err := limits.NewQueryLimits(limitsOpts)
  1608  			permitOpts := permits.NewOptions().
  1609  				SetSeriesReadPermitsManager(permits.NewLookbackLimitPermitsManager(
  1610  					"disk-series-read",
  1611  					limitsOpts.DiskSeriesReadLimitOpts(),
  1612  					testTChannelThriftOptions.InstrumentOptions(),
  1613  					limitsOpts.SourceLoggerBuilder(),
  1614  				))
  1615  
  1616  			require.NoError(t, err)
  1617  			testTChannelThriftOptions = testTChannelThriftOptions.
  1618  				SetQueryLimits(queryLimits).
  1619  				SetPermitsOptions(permitOpts)
  1620  
  1621  			service := NewService(mockDB, testTChannelThriftOptions).(*service)
  1622  
  1623  			tctx, _ := tchannelthrift.NewContext(time.Minute)
  1624  			ctx := tchannelthrift.Context(tctx)
  1625  			defer ctx.Close()
  1626  
  1627  			mtr := mocktracer.New()
  1628  			sp := mtr.StartSpan("root")
  1629  			stdCtx := opentracing.ContextWithSpan(gocontext.Background(), sp)
  1630  			stdCtx, cancel := gocontext.WithCancel(stdCtx)
  1631  			defer cancel()
  1632  			ctx.SetGoContext(stdCtx)
  1633  
  1634  			start := xtime.Now().Add(-2 * time.Hour)
  1635  			end := start.Add(2 * time.Hour)
  1636  
  1637  			start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1638  
  1639  			nsID := "metrics"
  1640  
  1641  			streams := map[string]xio.SegmentReader{}
  1642  			seriesData := map[string][]struct {
  1643  				t xtime.UnixNano
  1644  				v float64
  1645  			}{
  1646  				"foo": {
  1647  					{start.Add(10 * time.Second), 1.0},
  1648  					{start.Add(20 * time.Second), 2.0},
  1649  				},
  1650  				"bar": {
  1651  					{start.Add(20 * time.Second), 3.0},
  1652  					{start.Add(30 * time.Second), 4.0},
  1653  				},
  1654  			}
  1655  			for id, s := range seriesData {
  1656  				enc := testStorageOpts.EncoderPool().Get()
  1657  				enc.Reset(start, 0, nil)
  1658  				for _, v := range s {
  1659  					dp := ts.Datapoint{
  1660  						TimestampNanos: v.t,
  1661  						Value:          v.v,
  1662  					}
  1663  					require.NoError(t, enc.Encode(dp, xtime.Second, nil))
  1664  				}
  1665  
  1666  				stream, _ := enc.Stream(ctx)
  1667  				streams[id] = stream
  1668  				mockDB.EXPECT().
  1669  					ReadEncoded(gomock.Any(), ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
  1670  					DoAndReturn(func(
  1671  						ctx context.Context,
  1672  						namespace ident.ID,
  1673  						id ident.ID,
  1674  						start, end xtime.UnixNano) (series.BlockReaderIter, error) {
  1675  						if tc.blockReadCancel {
  1676  							cancel()
  1677  						}
  1678  						return &series.FakeBlockReaderIter{
  1679  							Readers: [][]xio.BlockReader{{
  1680  								xio.BlockReader{
  1681  									SegmentReader: stream,
  1682  								},
  1683  							}},
  1684  						}, nil
  1685  					})
  1686  			}
  1687  
  1688  			req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  1689  			require.NoError(t, err)
  1690  			qry := index.Query{Query: req}
  1691  
  1692  			md1 := doc.Metadata{
  1693  				ID: ident.BytesID("foo"),
  1694  				Fields: []doc.Field{
  1695  					{
  1696  						Name:  []byte("foo"),
  1697  						Value: []byte("bar"),
  1698  					},
  1699  					{
  1700  						Name:  []byte("baz"),
  1701  						Value: []byte("dxk"),
  1702  					},
  1703  				},
  1704  			}
  1705  			md2 := doc.Metadata{
  1706  				ID: ident.BytesID("bar"),
  1707  				Fields: []doc.Field{
  1708  					{
  1709  						Name:  []byte("foo"),
  1710  						Value: []byte("bar"),
  1711  					},
  1712  					{
  1713  						Name:  []byte("dzk"),
  1714  						Value: []byte("baz"),
  1715  					},
  1716  				},
  1717  			}
  1718  
  1719  			resMap := index.NewQueryResults(ident.StringID(nsID),
  1720  				index.QueryResultsOptions{}, testIndexOptions)
  1721  			resMap.Map().Set(md1.ID, doc.NewDocumentFromMetadata(md1))
  1722  			resMap.Map().Set(md2.ID, doc.NewDocumentFromMetadata(md2))
  1723  			var (
  1724  				seriesLimit int64 = 10
  1725  				docsLimit   int64 = 10
  1726  			)
  1727  			mockDB.EXPECT().QueryIDs(
  1728  				gomock.Any(),
  1729  				ident.NewIDMatcher(nsID),
  1730  				index.NewQueryMatcher(qry),
  1731  				index.QueryOptions{
  1732  					StartInclusive: start,
  1733  					EndExclusive:   end,
  1734  					SeriesLimit:    int(seriesLimit),
  1735  					DocsLimit:      int(docsLimit),
  1736  					Source:         []byte("foo"),
  1737  				}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
  1738  
  1739  			startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  1740  			require.NoError(t, err)
  1741  			endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  1742  			require.NoError(t, err)
  1743  
  1744  			data, err := idx.Marshal(req)
  1745  			require.NoError(t, err)
  1746  			r, err := service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  1747  				NameSpace:   []byte(nsID),
  1748  				Query:       data,
  1749  				RangeStart:  startNanos,
  1750  				RangeEnd:    endNanos,
  1751  				FetchData:   true,
  1752  				SeriesLimit: &seriesLimit,
  1753  				DocsLimit:   &docsLimit,
  1754  				Source:      []byte("foo"),
  1755  			})
  1756  			if tc.fetchErrMsg != "" {
  1757  				require.Error(t, err)
  1758  				require.Contains(t, err.Error(), tc.fetchErrMsg)
  1759  				return
  1760  			}
  1761  			require.NoError(t, err)
  1762  
  1763  			// sort to order results to make test deterministic.
  1764  			sort.Slice(r.Elements, func(i, j int) bool {
  1765  				return bytes.Compare(r.Elements[i].ID, r.Elements[j].ID) < 0
  1766  			})
  1767  			ids := [][]byte{[]byte("bar"), []byte("foo")}
  1768  			require.Equal(t, len(ids), len(r.Elements))
  1769  			//nolint: dupl
  1770  			for i, id := range ids {
  1771  				elem := r.Elements[i]
  1772  				require.NotNil(t, elem)
  1773  
  1774  				assert.Nil(t, elem.Err)
  1775  				require.Equal(t, 1, len(elem.Segments))
  1776  
  1777  				seg := elem.Segments[0]
  1778  				require.NotNil(t, seg)
  1779  				require.NotNil(t, seg.Merged)
  1780  
  1781  				var expectHead, expectTail []byte
  1782  				expectSegment, err := streams[string(id)].Segment()
  1783  				require.NoError(t, err)
  1784  
  1785  				if expectSegment.Head != nil {
  1786  					expectHead = expectSegment.Head.Bytes()
  1787  				}
  1788  				if expectSegment.Tail != nil {
  1789  					expectTail = expectSegment.Tail.Bytes()
  1790  				}
  1791  
  1792  				assert.Equal(t, expectHead, seg.Merged.Head)
  1793  				assert.Equal(t, expectTail, seg.Merged.Tail)
  1794  			}
  1795  
  1796  			sp.Finish()
  1797  			spans := mtr.FinishedSpans()
  1798  
  1799  			require.Len(t, spans, 2)
  1800  			assert.Equal(t, tracepoint.FetchTagged, spans[0].OperationName)
  1801  			assert.Equal(t, "root", spans[1].OperationName)
  1802  
  1803  			require.Equal(t, "FetchTagged",
  1804  				ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
  1805  			require.Equal(t, "foo",
  1806  				string(ctx.GoContext().Value(limits.SourceContextKey).([]byte)))
  1807  		})
  1808  	}
  1809  }
  1810  
  1811  func TestServiceFetchTaggedIsOverloaded(t *testing.T) {
  1812  	ctrl := xtest.NewController(t)
  1813  	defer ctrl.Finish()
  1814  
  1815  	mockDB := storage.NewMockDatabase(ctrl)
  1816  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1817  	mockDB.EXPECT().IsOverloaded().Return(true)
  1818  
  1819  	var (
  1820  		service = NewService(mockDB, testTChannelThriftOptions).(*service)
  1821  
  1822  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1823  		ctx     = tchannelthrift.Context(tctx)
  1824  
  1825  		start = xtime.Now().Add(-2 * time.Hour)
  1826  		end   = start.Add(2 * time.Hour)
  1827  
  1828  		nsID = "metrics"
  1829  	)
  1830  
  1831  	defer ctx.Close()
  1832  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1833  
  1834  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  1835  	require.NoError(t, err)
  1836  
  1837  	md1 := doc.Metadata{
  1838  		ID: ident.BytesID("foo"),
  1839  		Fields: []doc.Field{
  1840  			{
  1841  				Name:  []byte("foo"),
  1842  				Value: []byte("bar"),
  1843  			},
  1844  			{
  1845  				Name:  []byte("baz"),
  1846  				Value: []byte("dxk"),
  1847  			},
  1848  		},
  1849  	}
  1850  	md2 := doc.Metadata{
  1851  		ID: ident.BytesID("bar"),
  1852  		Fields: []doc.Field{
  1853  			{
  1854  				Name:  []byte("foo"),
  1855  				Value: []byte("bar"),
  1856  			},
  1857  			{
  1858  				Name:  []byte("dzk"),
  1859  				Value: []byte("baz"),
  1860  			},
  1861  		},
  1862  	}
  1863  
  1864  	resMap := index.NewQueryResults(ident.StringID(nsID),
  1865  		index.QueryResultsOptions{}, testIndexOptions)
  1866  	resMap.Map().Set(md1.ID, doc.NewDocumentFromMetadata(md1))
  1867  	resMap.Map().Set(md2.ID, doc.NewDocumentFromMetadata(md2))
  1868  
  1869  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  1870  	require.NoError(t, err)
  1871  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  1872  	require.NoError(t, err)
  1873  	var (
  1874  		seriesLimit int64 = 10
  1875  		docsLimit   int64 = 10
  1876  	)
  1877  	data, err := idx.Marshal(req)
  1878  	require.NoError(t, err)
  1879  	_, err = service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  1880  		NameSpace:   []byte(nsID),
  1881  		Query:       data,
  1882  		RangeStart:  startNanos,
  1883  		RangeEnd:    endNanos,
  1884  		FetchData:   true,
  1885  		SeriesLimit: &seriesLimit,
  1886  		DocsLimit:   &docsLimit,
  1887  	})
  1888  	require.Equal(t, convert.ToRPCError(tterrors.NewInternalError(errServerIsOverloaded)), err)
  1889  }
  1890  
  1891  func TestServiceFetchTaggedDatabaseNotSet(t *testing.T) {
  1892  	ctrl := xtest.NewController(t)
  1893  	defer ctrl.Finish()
  1894  
  1895  	var (
  1896  		service = NewService(nil, testTChannelThriftOptions).(*service)
  1897  
  1898  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  1899  		ctx     = tchannelthrift.Context(tctx)
  1900  
  1901  		start = xtime.Now().Add(-2 * time.Hour)
  1902  		end   = start.Add(2 * time.Hour)
  1903  
  1904  		nsID = "metrics"
  1905  	)
  1906  
  1907  	defer ctx.Close()
  1908  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1909  
  1910  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  1911  	require.NoError(t, err)
  1912  
  1913  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  1914  	require.NoError(t, err)
  1915  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  1916  	require.NoError(t, err)
  1917  	var (
  1918  		seriesLimit int64 = 10
  1919  		docsLimit   int64 = 10
  1920  	)
  1921  	data, err := idx.Marshal(req)
  1922  	require.NoError(t, err)
  1923  
  1924  	_, err = service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  1925  		NameSpace:   []byte(nsID),
  1926  		Query:       data,
  1927  		RangeStart:  startNanos,
  1928  		RangeEnd:    endNanos,
  1929  		FetchData:   true,
  1930  		SeriesLimit: &seriesLimit,
  1931  		DocsLimit:   &docsLimit,
  1932  	})
  1933  	require.Equal(t, convert.ToRPCError(tterrors.NewInternalError(errDatabaseIsNotInitializedYet)), err)
  1934  }
  1935  
  1936  func TestServiceFetchTaggedNoData(t *testing.T) {
  1937  	ctrl := xtest.NewController(t)
  1938  	defer ctrl.Finish()
  1939  
  1940  	mockDB := storage.NewMockDatabase(ctrl)
  1941  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  1942  	mockDB.EXPECT().IsOverloaded().Return(false)
  1943  
  1944  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  1945  
  1946  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  1947  	ctx := tchannelthrift.Context(tctx)
  1948  	defer ctx.Close()
  1949  
  1950  	start := xtime.Now().Add(-2 * time.Hour)
  1951  	end := start.Add(2 * time.Hour)
  1952  
  1953  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  1954  	nsID := "metrics"
  1955  
  1956  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  1957  	require.NoError(t, err)
  1958  	qry := index.Query{Query: req}
  1959  
  1960  	md1 := doc.Metadata{
  1961  		ID:     ident.BytesID("foo"),
  1962  		Fields: []doc.Field{},
  1963  	}
  1964  	md2 := doc.Metadata{
  1965  		ID:     ident.BytesID("bar"),
  1966  		Fields: []doc.Field{},
  1967  	}
  1968  
  1969  	resMap := index.NewQueryResults(ident.StringID(nsID),
  1970  		index.QueryResultsOptions{}, testIndexOptions)
  1971  	resMap.Map().Set(md1.ID, doc.NewDocumentFromMetadata(md1))
  1972  	resMap.Map().Set(md2.ID, doc.NewDocumentFromMetadata(md2))
  1973  	var (
  1974  		seriesLimit int64 = 10
  1975  		docsLimit   int64 = 10
  1976  	)
  1977  	mockDB.EXPECT().QueryIDs(
  1978  		ctx,
  1979  		ident.NewIDMatcher(nsID),
  1980  		index.NewQueryMatcher(qry),
  1981  		index.QueryOptions{
  1982  			StartInclusive: start,
  1983  			EndExclusive:   end,
  1984  			SeriesLimit:    int(seriesLimit),
  1985  			DocsLimit:      int(docsLimit),
  1986  		}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
  1987  
  1988  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  1989  	require.NoError(t, err)
  1990  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  1991  	require.NoError(t, err)
  1992  
  1993  	data, err := idx.Marshal(req)
  1994  	require.NoError(t, err)
  1995  	r, err := service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  1996  		NameSpace:   []byte(nsID),
  1997  		Query:       data,
  1998  		RangeStart:  startNanos,
  1999  		RangeEnd:    endNanos,
  2000  		FetchData:   false,
  2001  		SeriesLimit: &seriesLimit,
  2002  		DocsLimit:   &docsLimit,
  2003  	})
  2004  	require.NoError(t, err)
  2005  
  2006  	// sort to order results to make test deterministic.
  2007  	sort.Slice(r.Elements, func(i, j int) bool {
  2008  		return bytes.Compare(r.Elements[i].ID, r.Elements[j].ID) < 0
  2009  	})
  2010  	ids := [][]byte{[]byte("bar"), []byte("foo")}
  2011  	require.Equal(t, len(ids), len(r.Elements))
  2012  	for i, id := range ids {
  2013  		elem := r.Elements[i]
  2014  		require.NotNil(t, elem)
  2015  		require.Nil(t, elem.Err)
  2016  		require.Equal(t, id, elem.ID)
  2017  	}
  2018  }
  2019  
  2020  func TestServiceFetchTaggedErrs(t *testing.T) {
  2021  	ctrl := xtest.NewController(t)
  2022  	defer ctrl.Finish()
  2023  
  2024  	mockDB := storage.NewMockDatabase(ctrl)
  2025  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2026  	mockDB.EXPECT().IsOverloaded().Return(false)
  2027  
  2028  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2029  
  2030  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2031  	ctx := tchannelthrift.Context(tctx)
  2032  	defer ctx.Close()
  2033  
  2034  	start := xtime.Now().Add(-2 * time.Hour)
  2035  	end := start.Add(2 * time.Hour)
  2036  
  2037  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  2038  	nsID := "metrics"
  2039  
  2040  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  2041  	require.NoError(t, err)
  2042  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  2043  	require.NoError(t, err)
  2044  	var (
  2045  		seriesLimit int64 = 10
  2046  		docsLimit   int64 = 10
  2047  	)
  2048  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  2049  	require.NoError(t, err)
  2050  	data, err := idx.Marshal(req)
  2051  	require.NoError(t, err)
  2052  	qry := index.Query{Query: req}
  2053  
  2054  	mockDB.EXPECT().QueryIDs(
  2055  		ctx,
  2056  		ident.NewIDMatcher(nsID),
  2057  		index.NewQueryMatcher(qry),
  2058  		index.QueryOptions{
  2059  			StartInclusive: start,
  2060  			EndExclusive:   end,
  2061  			SeriesLimit:    int(seriesLimit),
  2062  			DocsLimit:      int(docsLimit),
  2063  		}).Return(index.QueryResult{}, fmt.Errorf("random err"))
  2064  	_, err = service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  2065  		NameSpace:   []byte(nsID),
  2066  		Query:       data,
  2067  		RangeStart:  startNanos,
  2068  		RangeEnd:    endNanos,
  2069  		FetchData:   false,
  2070  		SeriesLimit: &seriesLimit,
  2071  		DocsLimit:   &docsLimit,
  2072  	})
  2073  	require.Error(t, err)
  2074  }
  2075  
  2076  func TestServiceFetchTaggedReturnOnFirstErr(t *testing.T) {
  2077  	ctrl := xtest.NewController(t)
  2078  	defer ctrl.Finish()
  2079  
  2080  	mockDB := storage.NewMockDatabase(ctrl)
  2081  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2082  	mockDB.EXPECT().IsOverloaded().Return(false)
  2083  
  2084  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2085  
  2086  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2087  	ctx := tchannelthrift.Context(tctx)
  2088  	defer ctx.Close()
  2089  
  2090  	mtr := mocktracer.New()
  2091  	sp := mtr.StartSpan("root")
  2092  	ctx.SetGoContext(opentracing.ContextWithSpan(gocontext.Background(), sp))
  2093  
  2094  	start := xtime.Now().Add(-2 * time.Hour)
  2095  	end := start.Add(2 * time.Hour)
  2096  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  2097  
  2098  	nsID := "metrics"
  2099  
  2100  	id := "foo"
  2101  	s := []struct {
  2102  		t xtime.UnixNano
  2103  		v float64
  2104  	}{
  2105  		{start.Add(10 * time.Second), 1.0},
  2106  		{start.Add(20 * time.Second), 2.0},
  2107  	}
  2108  	enc := testStorageOpts.EncoderPool().Get()
  2109  	enc.Reset(start, 0, nil)
  2110  	for _, v := range s {
  2111  		dp := ts.Datapoint{
  2112  			TimestampNanos: v.t,
  2113  			Value:          v.v,
  2114  		}
  2115  		require.NoError(t, enc.Encode(dp, xtime.Second, nil))
  2116  	}
  2117  
  2118  	stream, _ := enc.Stream(ctx)
  2119  	mockDB.EXPECT().
  2120  		ReadEncoded(gomock.Any(), ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
  2121  		Return(&series.FakeBlockReaderIter{
  2122  			Readers: [][]xio.BlockReader{{
  2123  				xio.BlockReader{
  2124  					SegmentReader: stream,
  2125  				},
  2126  			}},
  2127  		}, fmt.Errorf("random err")) // Return error that should trigger failure of the entire call
  2128  
  2129  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  2130  	require.NoError(t, err)
  2131  	qry := index.Query{Query: req}
  2132  
  2133  	md1 := doc.Metadata{
  2134  		ID: ident.BytesID("foo"),
  2135  		Fields: []doc.Field{
  2136  			{
  2137  				Name:  []byte("foo"),
  2138  				Value: []byte("bar"),
  2139  			},
  2140  			{
  2141  				Name:  []byte("baz"),
  2142  				Value: []byte("dxk"),
  2143  			},
  2144  		},
  2145  	}
  2146  
  2147  	resMap := index.NewQueryResults(ident.StringID(nsID),
  2148  		index.QueryResultsOptions{}, testIndexOptions)
  2149  	resMap.Map().Set(md1.ID, doc.NewDocumentFromMetadata(md1))
  2150  	var (
  2151  		seriesLimit int64 = 10
  2152  		docsLimit   int64 = 10
  2153  	)
  2154  	mockDB.EXPECT().QueryIDs(
  2155  		gomock.Any(),
  2156  		ident.NewIDMatcher(nsID),
  2157  		index.NewQueryMatcher(qry),
  2158  		index.QueryOptions{
  2159  			StartInclusive: start,
  2160  			EndExclusive:   end,
  2161  			SeriesLimit:    int(seriesLimit),
  2162  			DocsLimit:      int(docsLimit),
  2163  		}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
  2164  
  2165  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  2166  	require.NoError(t, err)
  2167  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  2168  	require.NoError(t, err)
  2169  
  2170  	data, err := idx.Marshal(req)
  2171  	require.NoError(t, err)
  2172  	_, err = service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
  2173  		NameSpace:   []byte(nsID),
  2174  		Query:       data,
  2175  		RangeStart:  startNanos,
  2176  		RangeEnd:    endNanos,
  2177  		FetchData:   true,
  2178  		SeriesLimit: &seriesLimit,
  2179  		DocsLimit:   &docsLimit,
  2180  	})
  2181  	require.Error(t, err)
  2182  }
  2183  
  2184  func TestServiceAggregate(t *testing.T) {
  2185  	ctrl := xtest.NewController(t)
  2186  	defer ctrl.Finish()
  2187  
  2188  	mockDB := storage.NewMockDatabase(ctrl)
  2189  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2190  	mockDB.EXPECT().IsOverloaded().Return(false)
  2191  
  2192  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2193  
  2194  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2195  	ctx := tchannelthrift.Context(tctx)
  2196  	defer ctx.Close()
  2197  
  2198  	start := xtime.Now().Add(-2 * time.Hour)
  2199  	end := start.Add(2 * time.Hour)
  2200  
  2201  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  2202  	nsID := "metrics"
  2203  
  2204  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  2205  	require.NoError(t, err)
  2206  	qry := index.Query{Query: req}
  2207  
  2208  	resMap := index.NewAggregateResults(ident.StringID(nsID),
  2209  		index.AggregateResultsOptions{}, testIndexOptions)
  2210  	resMap.Map().Set(ident.StringID("foo"), index.MustNewAggregateValues(testIndexOptions))
  2211  	resMap.Map().Set(ident.StringID("bar"), index.MustNewAggregateValues(testIndexOptions,
  2212  		ident.StringID("baz"), ident.StringID("barf")))
  2213  
  2214  	var (
  2215  		seriesLimit int64 = 10
  2216  		docsLimit   int64 = 10
  2217  	)
  2218  	mockDB.EXPECT().AggregateQuery(
  2219  		ctx,
  2220  		ident.NewIDMatcher(nsID),
  2221  		index.NewQueryMatcher(qry),
  2222  		index.AggregationOptions{
  2223  			QueryOptions: index.QueryOptions{
  2224  				StartInclusive: start,
  2225  				EndExclusive:   end,
  2226  				SeriesLimit:    int(seriesLimit),
  2227  				DocsLimit:      int(docsLimit),
  2228  			},
  2229  			FieldFilter: index.AggregateFieldFilter{
  2230  				[]byte("foo"), []byte("bar"),
  2231  			},
  2232  			Type: index.AggregateTagNamesAndValues,
  2233  		}).Return(
  2234  		index.AggregateQueryResult{Results: resMap, Exhaustive: true}, nil)
  2235  
  2236  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  2237  	require.NoError(t, err)
  2238  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  2239  	require.NoError(t, err)
  2240  
  2241  	data, err := idx.Marshal(req)
  2242  	require.NoError(t, err)
  2243  	r, err := service.AggregateRaw(tctx, &rpc.AggregateQueryRawRequest{
  2244  		NameSpace:          []byte(nsID),
  2245  		Query:              data,
  2246  		RangeStart:         startNanos,
  2247  		RangeEnd:           endNanos,
  2248  		SeriesLimit:        &seriesLimit,
  2249  		DocsLimit:          &docsLimit,
  2250  		AggregateQueryType: rpc.AggregateQueryType_AGGREGATE_BY_TAG_NAME_VALUE,
  2251  		TagNameFilter: [][]byte{
  2252  			[]byte("foo"), []byte("bar"),
  2253  		},
  2254  	})
  2255  	require.NoError(t, err)
  2256  
  2257  	// sort to order results to make test deterministic.
  2258  	sort.Slice(r.Results, func(i, j int) bool {
  2259  		return bytes.Compare(r.Results[i].TagName, r.Results[j].TagName) < 0
  2260  	})
  2261  	require.Equal(t, 2, len(r.Results))
  2262  	require.Equal(t, "bar", string(r.Results[0].TagName))
  2263  	require.Equal(t, 2, len(r.Results[0].TagValues))
  2264  	sort.Slice(r.Results[0].TagValues, func(i, j int) bool {
  2265  		return bytes.Compare(
  2266  			r.Results[0].TagValues[i].TagValue, r.Results[0].TagValues[j].TagValue) < 0
  2267  	})
  2268  	require.Equal(t, "barf", string(r.Results[0].TagValues[0].TagValue))
  2269  	require.Equal(t, "baz", string(r.Results[0].TagValues[1].TagValue))
  2270  
  2271  	require.Equal(t, "foo", string(r.Results[1].TagName))
  2272  	require.Equal(t, 0, len(r.Results[1].TagValues))
  2273  
  2274  	require.Equal(t, "AggregateRaw",
  2275  		ctx.GoContext().Value(tchannelthrift.EndpointContextKey).(tchannelthrift.Endpoint).String())
  2276  }
  2277  
  2278  func TestServiceAggregateNameOnly(t *testing.T) {
  2279  	ctrl := xtest.NewController(t)
  2280  	defer ctrl.Finish()
  2281  
  2282  	mockDB := storage.NewMockDatabase(ctrl)
  2283  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2284  	mockDB.EXPECT().IsOverloaded().Return(false)
  2285  
  2286  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2287  
  2288  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2289  	ctx := tchannelthrift.Context(tctx)
  2290  	defer ctx.Close()
  2291  
  2292  	start := xtime.Now().Add(-2 * time.Hour)
  2293  	end := start.Add(2 * time.Hour)
  2294  
  2295  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  2296  	nsID := "metrics"
  2297  
  2298  	req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
  2299  	require.NoError(t, err)
  2300  	qry := index.Query{Query: req}
  2301  
  2302  	resMap := index.NewAggregateResults(ident.StringID(nsID),
  2303  		index.AggregateResultsOptions{}, testIndexOptions)
  2304  	resMap.Map().Set(ident.StringID("foo"), index.AggregateValues{})
  2305  	resMap.Map().Set(ident.StringID("bar"), index.AggregateValues{})
  2306  	var (
  2307  		seriesLimit int64 = 10
  2308  		docsLimit   int64 = 10
  2309  	)
  2310  	mockDB.EXPECT().AggregateQuery(
  2311  		ctx,
  2312  		ident.NewIDMatcher(nsID),
  2313  		index.NewQueryMatcher(qry),
  2314  		index.AggregationOptions{
  2315  			QueryOptions: index.QueryOptions{
  2316  				StartInclusive: start,
  2317  				EndExclusive:   end,
  2318  				SeriesLimit:    int(seriesLimit),
  2319  				DocsLimit:      int(docsLimit),
  2320  			},
  2321  			FieldFilter: index.AggregateFieldFilter{
  2322  				[]byte("foo"), []byte("bar"),
  2323  			},
  2324  			Type: index.AggregateTagNames,
  2325  		}).Return(
  2326  		index.AggregateQueryResult{Results: resMap, Exhaustive: true}, nil)
  2327  
  2328  	startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
  2329  	require.NoError(t, err)
  2330  	endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
  2331  	require.NoError(t, err)
  2332  
  2333  	data, err := idx.Marshal(req)
  2334  	require.NoError(t, err)
  2335  	r, err := service.AggregateRaw(tctx, &rpc.AggregateQueryRawRequest{
  2336  		NameSpace:          []byte(nsID),
  2337  		Query:              data,
  2338  		RangeStart:         startNanos,
  2339  		RangeEnd:           endNanos,
  2340  		SeriesLimit:        &seriesLimit,
  2341  		DocsLimit:          &docsLimit,
  2342  		AggregateQueryType: rpc.AggregateQueryType_AGGREGATE_BY_TAG_NAME,
  2343  		TagNameFilter: [][]byte{
  2344  			[]byte("foo"), []byte("bar"),
  2345  		},
  2346  	})
  2347  	require.NoError(t, err)
  2348  
  2349  	// sort to order results to make test deterministic.
  2350  	sort.Slice(r.Results, func(i, j int) bool {
  2351  		return bytes.Compare(r.Results[i].TagName, r.Results[j].TagName) < 0
  2352  	})
  2353  	require.Equal(t, 2, len(r.Results))
  2354  	require.Equal(t, "bar", string(r.Results[0].TagName))
  2355  	require.Equal(t, 0, len(r.Results[0].TagValues))
  2356  	require.Equal(t, "foo", string(r.Results[1].TagName))
  2357  	require.Equal(t, 0, len(r.Results[1].TagValues))
  2358  }
  2359  
  2360  func TestServiceWrite(t *testing.T) {
  2361  	ctrl := xtest.NewController(t)
  2362  	defer ctrl.Finish()
  2363  
  2364  	mockDB := storage.NewMockDatabase(ctrl)
  2365  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2366  
  2367  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2368  
  2369  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2370  	ctx := tchannelthrift.Context(tctx)
  2371  	defer ctx.Close()
  2372  
  2373  	nsID := "metrics"
  2374  
  2375  	id := "foo"
  2376  
  2377  	at := xtime.Now().Truncate(time.Second)
  2378  	value := 42.42
  2379  
  2380  	mockDB.EXPECT().
  2381  		Write(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), at, value,
  2382  			xtime.Second, nil).
  2383  		Return(nil)
  2384  
  2385  	mockDB.EXPECT().IsOverloaded().Return(false)
  2386  	err := service.Write(tctx, &rpc.WriteRequest{
  2387  		NameSpace: nsID,
  2388  		ID:        id,
  2389  		Datapoint: &rpc.Datapoint{
  2390  			Timestamp:         at.Seconds(),
  2391  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2392  			Value:             value,
  2393  		},
  2394  	})
  2395  	require.NoError(t, err)
  2396  }
  2397  
  2398  func TestServiceWriteOverloaded(t *testing.T) {
  2399  	ctrl := xtest.NewController(t)
  2400  	defer ctrl.Finish()
  2401  
  2402  	mockDB := storage.NewMockDatabase(ctrl)
  2403  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2404  
  2405  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2406  
  2407  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2408  	ctx := tchannelthrift.Context(tctx)
  2409  	defer ctx.Close()
  2410  
  2411  	mockDB.EXPECT().IsOverloaded().Return(true)
  2412  	err := service.Write(tctx, &rpc.WriteRequest{
  2413  		NameSpace: "metrics",
  2414  		ID:        "foo",
  2415  		Datapoint: &rpc.Datapoint{
  2416  			Timestamp:         time.Now().Unix(),
  2417  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2418  			Value:             42.42,
  2419  		},
  2420  	})
  2421  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  2422  }
  2423  
  2424  func TestServiceWriteDatabaseNotSet(t *testing.T) {
  2425  	ctrl := xtest.NewController(t)
  2426  	defer ctrl.Finish()
  2427  
  2428  	var (
  2429  		service = NewService(nil, testTChannelThriftOptions).(*service)
  2430  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  2431  		ctx     = tchannelthrift.Context(tctx)
  2432  	)
  2433  
  2434  	defer ctx.Close()
  2435  
  2436  	err := service.Write(tctx, &rpc.WriteRequest{
  2437  		NameSpace: "metrics",
  2438  		ID:        "foo",
  2439  		Datapoint: &rpc.Datapoint{
  2440  			Timestamp:         time.Now().Unix(),
  2441  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2442  			Value:             42.42,
  2443  		},
  2444  	})
  2445  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  2446  }
  2447  
  2448  func TestServiceWriteTagged(t *testing.T) {
  2449  	ctrl := xtest.NewController(t)
  2450  	defer ctrl.Finish()
  2451  
  2452  	mockDB := storage.NewMockDatabase(ctrl)
  2453  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2454  
  2455  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2456  
  2457  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2458  	ctx := tchannelthrift.Context(tctx)
  2459  	defer ctx.Close()
  2460  
  2461  	var (
  2462  		nsID      = "metrics"
  2463  		id        = "foo"
  2464  		tagNames  = []string{"foo", "bar", "baz"}
  2465  		tagValues = []string{"cmon", "keep", "going"}
  2466  		at        = time.Now().Truncate(time.Second)
  2467  		value     = 42.42
  2468  	)
  2469  
  2470  	mockDB.EXPECT().WriteTagged(ctx,
  2471  		ident.NewIDMatcher(nsID),
  2472  		ident.NewIDMatcher(id),
  2473  		gomock.Any(),
  2474  		xtime.ToUnixNano(at), value, xtime.Second, nil,
  2475  	).Return(nil)
  2476  
  2477  	request := &rpc.WriteTaggedRequest{
  2478  		NameSpace: nsID,
  2479  		ID:        id,
  2480  		Datapoint: &rpc.Datapoint{
  2481  			Timestamp:         at.Unix(),
  2482  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2483  			Value:             value,
  2484  		},
  2485  		Tags: []*rpc.Tag{},
  2486  	}
  2487  
  2488  	for i := range tagNames {
  2489  		request.Tags = append(request.Tags, &rpc.Tag{
  2490  			Name:  tagNames[i],
  2491  			Value: tagValues[i],
  2492  		})
  2493  	}
  2494  	mockDB.EXPECT().IsOverloaded().Return(false)
  2495  	err := service.WriteTagged(tctx, request)
  2496  	require.NoError(t, err)
  2497  }
  2498  
  2499  func TestServiceWriteTaggedOverloaded(t *testing.T) {
  2500  	ctrl := xtest.NewController(t)
  2501  	defer ctrl.Finish()
  2502  
  2503  	mockDB := storage.NewMockDatabase(ctrl)
  2504  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2505  
  2506  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2507  
  2508  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2509  	ctx := tchannelthrift.Context(tctx)
  2510  	defer ctx.Close()
  2511  
  2512  	mockDB.EXPECT().IsOverloaded().Return(true)
  2513  	err := service.WriteTagged(tctx, &rpc.WriteTaggedRequest{
  2514  		NameSpace: "metrics",
  2515  		ID:        "foo",
  2516  		Datapoint: &rpc.Datapoint{
  2517  			Timestamp:         time.Now().Unix(),
  2518  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2519  			Value:             42.42,
  2520  		},
  2521  	})
  2522  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  2523  }
  2524  
  2525  func TestServiceWriteTaggedDatabaseNotSet(t *testing.T) {
  2526  	ctrl := xtest.NewController(t)
  2527  	defer ctrl.Finish()
  2528  
  2529  	var (
  2530  		service = NewService(nil, testTChannelThriftOptions).(*service)
  2531  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  2532  		ctx     = tchannelthrift.Context(tctx)
  2533  	)
  2534  	defer ctx.Close()
  2535  
  2536  	err := service.WriteTagged(tctx, &rpc.WriteTaggedRequest{
  2537  		NameSpace: "metrics",
  2538  		ID:        "foo",
  2539  		Datapoint: &rpc.Datapoint{
  2540  			Timestamp:         time.Now().Unix(),
  2541  			TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2542  			Value:             42.42,
  2543  		},
  2544  	})
  2545  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  2546  }
  2547  
  2548  func TestServiceWriteBatchRaw(t *testing.T) {
  2549  	ctrl := xtest.NewController(t)
  2550  	defer ctrl.Finish()
  2551  
  2552  	mockDB := storage.NewMockDatabase(ctrl)
  2553  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2554  
  2555  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2556  
  2557  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2558  	ctx := tchannelthrift.Context(tctx)
  2559  	defer ctx.Close()
  2560  
  2561  	nsID := "metrics"
  2562  
  2563  	values := []struct {
  2564  		id string
  2565  		t  time.Time
  2566  		v  float64
  2567  	}{
  2568  		{"foo", time.Now().Truncate(time.Second), 12.34},
  2569  		{"bar", time.Now().Truncate(time.Second), 42.42},
  2570  	}
  2571  
  2572  	writeBatch := writes.NewWriteBatch(0, ident.StringID(nsID), nil)
  2573  	mockDB.EXPECT().
  2574  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  2575  		Return(writeBatch, nil)
  2576  
  2577  	mockDB.EXPECT().
  2578  		WriteBatch(ctx, ident.NewIDMatcher(nsID), writeBatch, gomock.Any()).
  2579  		Return(nil)
  2580  
  2581  	var elements []*rpc.WriteBatchRawRequestElement
  2582  	for _, w := range values {
  2583  		elem := &rpc.WriteBatchRawRequestElement{
  2584  			ID: []byte(w.id),
  2585  			Datapoint: &rpc.Datapoint{
  2586  				Timestamp:         w.t.Unix(),
  2587  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2588  				Value:             w.v,
  2589  			},
  2590  		}
  2591  		elements = append(elements, elem)
  2592  	}
  2593  
  2594  	mockDB.EXPECT().IsOverloaded().Return(false)
  2595  	err := service.WriteBatchRaw(tctx, &rpc.WriteBatchRawRequest{
  2596  		NameSpace: []byte(nsID),
  2597  		Elements:  elements,
  2598  	})
  2599  	require.NoError(t, err)
  2600  }
  2601  
  2602  func TestServiceWriteBatchRawV2SingleNS(t *testing.T) {
  2603  	ctrl := xtest.NewController(t)
  2604  	defer ctrl.Finish()
  2605  
  2606  	mockDB := storage.NewMockDatabase(ctrl)
  2607  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2608  
  2609  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2610  
  2611  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2612  	ctx := tchannelthrift.Context(tctx)
  2613  	defer ctx.Close()
  2614  
  2615  	nsID := "metrics"
  2616  
  2617  	values := []struct {
  2618  		id string
  2619  		t  time.Time
  2620  		v  float64
  2621  	}{
  2622  		{"foo", time.Now().Truncate(time.Second), 12.34},
  2623  		{"bar", time.Now().Truncate(time.Second), 42.42},
  2624  	}
  2625  
  2626  	writeBatch := writes.NewWriteBatch(0, ident.StringID(nsID), nil)
  2627  	mockDB.EXPECT().
  2628  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  2629  		Return(writeBatch, nil)
  2630  
  2631  	mockDB.EXPECT().
  2632  		WriteBatch(ctx, ident.NewIDMatcher(nsID), writeBatch, gomock.Any()).
  2633  		Return(nil)
  2634  
  2635  	var elements []*rpc.WriteBatchRawV2RequestElement
  2636  	for _, w := range values {
  2637  		elem := &rpc.WriteBatchRawV2RequestElement{
  2638  			NameSpace: 0,
  2639  			ID:        []byte(w.id),
  2640  			Datapoint: &rpc.Datapoint{
  2641  				Timestamp:         w.t.Unix(),
  2642  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2643  				Value:             w.v,
  2644  			},
  2645  		}
  2646  		elements = append(elements, elem)
  2647  	}
  2648  
  2649  	mockDB.EXPECT().IsOverloaded().Return(false)
  2650  	err := service.WriteBatchRawV2(tctx, &rpc.WriteBatchRawV2Request{
  2651  		NameSpaces: [][]byte{[]byte(nsID)},
  2652  		Elements:   elements,
  2653  	})
  2654  	require.NoError(t, err)
  2655  }
  2656  
  2657  func TestServiceWriteBatchRawV2MultiNS(t *testing.T) {
  2658  	ctrl := xtest.NewController(t)
  2659  	defer ctrl.Finish()
  2660  
  2661  	mockDB := storage.NewMockDatabase(ctrl)
  2662  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2663  
  2664  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2665  
  2666  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2667  	ctx := tchannelthrift.Context(tctx)
  2668  	defer ctx.Close()
  2669  
  2670  	var (
  2671  		nsID1 = "metrics"
  2672  		nsID2 = "more-metrics"
  2673  
  2674  		values = []struct {
  2675  			id string
  2676  			t  time.Time
  2677  			v  float64
  2678  		}{
  2679  			{"foo", time.Now().Truncate(time.Second), 12.34},
  2680  			{"bar", time.Now().Truncate(time.Second), 42.42},
  2681  		}
  2682  
  2683  		writeBatch1 = writes.NewWriteBatch(0, ident.StringID(nsID1), nil)
  2684  		writeBatch2 = writes.NewWriteBatch(0, ident.StringID(nsID2), nil)
  2685  	)
  2686  
  2687  	mockDB.EXPECT().
  2688  		BatchWriter(ident.NewIDMatcher(nsID1), len(values)*2).
  2689  		Return(writeBatch1, nil)
  2690  	mockDB.EXPECT().
  2691  		BatchWriter(ident.NewIDMatcher(nsID2), len(values)*2).
  2692  		Return(writeBatch2, nil)
  2693  
  2694  	mockDB.EXPECT().
  2695  		WriteBatch(ctx, ident.NewIDMatcher(nsID1), writeBatch1, gomock.Any()).
  2696  		Return(nil)
  2697  	mockDB.EXPECT().
  2698  		WriteBatch(ctx, ident.NewIDMatcher(nsID2), writeBatch2, gomock.Any()).
  2699  		Return(nil)
  2700  
  2701  	var elements []*rpc.WriteBatchRawV2RequestElement
  2702  	for nsIdx := range []string{nsID1, nsID2} {
  2703  		for _, w := range values {
  2704  			elem := &rpc.WriteBatchRawV2RequestElement{
  2705  				NameSpace: int64(nsIdx),
  2706  				ID:        []byte(w.id),
  2707  				Datapoint: &rpc.Datapoint{
  2708  					Timestamp:         w.t.Unix(),
  2709  					TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2710  					Value:             w.v,
  2711  				},
  2712  			}
  2713  			elements = append(elements, elem)
  2714  		}
  2715  	}
  2716  
  2717  	mockDB.EXPECT().IsOverloaded().Return(false)
  2718  	err := service.WriteBatchRawV2(tctx, &rpc.WriteBatchRawV2Request{
  2719  		NameSpaces: [][]byte{[]byte(nsID1), []byte(nsID2)},
  2720  		Elements:   elements,
  2721  	})
  2722  	require.NoError(t, err)
  2723  }
  2724  
  2725  func TestServiceWriteBatchRawOverloaded(t *testing.T) {
  2726  	ctrl := xtest.NewController(t)
  2727  	defer ctrl.Finish()
  2728  
  2729  	mockDB := storage.NewMockDatabase(ctrl)
  2730  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2731  
  2732  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  2733  
  2734  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2735  	ctx := tchannelthrift.Context(tctx)
  2736  	defer ctx.Close()
  2737  
  2738  	mockDB.EXPECT().IsOverloaded().Return(true)
  2739  	err := service.WriteBatchRaw(tctx, &rpc.WriteBatchRawRequest{
  2740  		NameSpace: []byte("metrics"),
  2741  	})
  2742  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  2743  }
  2744  
  2745  // TestServiceWriteBatchRawOverMaxOutstandingRequests tests that the WriteBatchRaw endpoint
  2746  // will reject requests if the number of outstanding write requests has hit the maximum.
  2747  func TestServiceWriteBatchRawOverMaxOutstandingRequests(t *testing.T) {
  2748  	ctrl := xtest.NewController(t)
  2749  	defer ctrl.Finish()
  2750  
  2751  	mockDB := storage.NewMockDatabase(ctrl)
  2752  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2753  
  2754  	tchanOpts := testTChannelThriftOptions.
  2755  		SetMaxOutstandingWriteRequests(1)
  2756  	service := NewService(mockDB, tchanOpts).(*service)
  2757  
  2758  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2759  	ctx := tchannelthrift.Context(tctx)
  2760  	defer ctx.Close()
  2761  
  2762  	nsID := "metrics"
  2763  
  2764  	values := []struct {
  2765  		id string
  2766  		t  time.Time
  2767  		v  float64
  2768  	}{
  2769  		{"foo", time.Now().Truncate(time.Second), 12.34},
  2770  		{"bar", time.Now().Truncate(time.Second), 42.42},
  2771  	}
  2772  
  2773  	var (
  2774  		testIsComplete       = make(chan struct{}, 0)
  2775  		requestIsOutstanding = make(chan struct{}, 0)
  2776  	)
  2777  	writeBatch := writes.NewWriteBatch(0, ident.StringID(nsID), nil)
  2778  	mockDB.EXPECT().
  2779  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  2780  		Do(func(nsID ident.ID, numValues int) {
  2781  			// Signal that a request is now outstanding.
  2782  			close(requestIsOutstanding)
  2783  			// Wait for test to complete.
  2784  			<-testIsComplete
  2785  		}).
  2786  		Return(writeBatch, nil)
  2787  	mockDB.EXPECT().
  2788  		WriteBatch(ctx, ident.NewIDMatcher(nsID), writeBatch, gomock.Any()).
  2789  		Return(nil).
  2790  		// AnyTimes() so we don't have to add extra signaling to wait for the
  2791  		// async goroutine to complete.
  2792  		AnyTimes()
  2793  
  2794  	var elements []*rpc.WriteBatchRawRequestElement
  2795  	for _, w := range values {
  2796  		elem := &rpc.WriteBatchRawRequestElement{
  2797  			ID: []byte(w.id),
  2798  			Datapoint: &rpc.Datapoint{
  2799  				Timestamp:         w.t.Unix(),
  2800  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2801  				Value:             w.v,
  2802  			},
  2803  		}
  2804  		elements = append(elements, elem)
  2805  	}
  2806  
  2807  	mockDB.EXPECT().IsOverloaded().Return(false).AnyTimes()
  2808  
  2809  	// First request will hang until the test is over (so a request is outstanding).
  2810  	outstandingRequestIsComplete := make(chan struct{}, 0)
  2811  	go func() {
  2812  		service.WriteBatchRaw(tctx, &rpc.WriteBatchRawRequest{
  2813  			NameSpace: []byte(nsID),
  2814  			Elements:  elements,
  2815  		})
  2816  		close(outstandingRequestIsComplete)
  2817  	}()
  2818  	<-requestIsOutstanding
  2819  
  2820  	// Second request should get an overloaded error since there is an outstanding request.
  2821  	err := service.WriteBatchRaw(tctx, &rpc.WriteBatchRawRequest{
  2822  		NameSpace: []byte(nsID),
  2823  		Elements:  elements,
  2824  	})
  2825  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  2826  	close(testIsComplete)
  2827  
  2828  	// Ensure the number of outstanding requests gets decremented at the end of the R.P.C.
  2829  	<-outstandingRequestIsComplete
  2830  	require.Equal(t, 0, service.state.numOutstandingWriteRPCs)
  2831  }
  2832  
  2833  func TestServiceWriteBatchRawDatabaseNotSet(t *testing.T) {
  2834  	ctrl := xtest.NewController(t)
  2835  	defer ctrl.Finish()
  2836  
  2837  	var (
  2838  		service = NewService(nil, testTChannelThriftOptions).(*service)
  2839  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  2840  		ctx     = tchannelthrift.Context(tctx)
  2841  	)
  2842  	defer ctx.Close()
  2843  
  2844  	err := service.WriteBatchRaw(tctx, &rpc.WriteBatchRawRequest{
  2845  		NameSpace: []byte("metrics"),
  2846  	})
  2847  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  2848  }
  2849  
  2850  func TestServiceWriteTaggedBatchRaw(t *testing.T) {
  2851  	ctrl := xtest.NewController(t)
  2852  	defer ctrl.Finish()
  2853  
  2854  	mockDB := storage.NewMockDatabase(ctrl)
  2855  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2856  
  2857  	opts := tchannelthrift.NewOptions()
  2858  
  2859  	service := NewService(mockDB, opts).(*service)
  2860  
  2861  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2862  	ctx := tchannelthrift.Context(tctx)
  2863  	defer ctx.Close()
  2864  
  2865  	nsID := "metrics"
  2866  
  2867  	values := []struct {
  2868  		id        string
  2869  		tagEncode string
  2870  		t         time.Time
  2871  		v         float64
  2872  	}{
  2873  		{"foo", "a|b", time.Now().Truncate(time.Second), 12.34},
  2874  		{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
  2875  	}
  2876  
  2877  	writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
  2878  	mockDB.EXPECT().
  2879  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  2880  		Return(writeBatch, nil)
  2881  
  2882  	mockDB.EXPECT().
  2883  		WriteTaggedBatch(ctx, ident.NewIDMatcher(nsID), writeBatch, gomock.Any()).
  2884  		Return(nil)
  2885  
  2886  	var elements []*rpc.WriteTaggedBatchRawRequestElement
  2887  	for _, w := range values {
  2888  		elem := &rpc.WriteTaggedBatchRawRequestElement{
  2889  			ID:          []byte(w.id),
  2890  			EncodedTags: []byte(w.tagEncode),
  2891  			Datapoint: &rpc.Datapoint{
  2892  				Timestamp:         w.t.Unix(),
  2893  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2894  				Value:             w.v,
  2895  			},
  2896  		}
  2897  		elements = append(elements, elem)
  2898  	}
  2899  
  2900  	mockDB.EXPECT().IsOverloaded().Return(false)
  2901  	err := service.WriteTaggedBatchRaw(tctx, &rpc.WriteTaggedBatchRawRequest{
  2902  		NameSpace: []byte(nsID),
  2903  		Elements:  elements,
  2904  	})
  2905  	require.NoError(t, err)
  2906  }
  2907  
  2908  func TestServiceWriteTaggedBatchRawV2(t *testing.T) {
  2909  	ctrl := xtest.NewController(t)
  2910  	defer ctrl.Finish()
  2911  
  2912  	mockDB := storage.NewMockDatabase(ctrl)
  2913  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2914  
  2915  	opts := tchannelthrift.NewOptions()
  2916  
  2917  	service := NewService(mockDB, opts).(*service)
  2918  
  2919  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2920  	ctx := tchannelthrift.Context(tctx)
  2921  	defer ctx.Close()
  2922  
  2923  	nsID := "metrics"
  2924  
  2925  	values := []struct {
  2926  		id        string
  2927  		tagEncode string
  2928  		t         time.Time
  2929  		v         float64
  2930  	}{
  2931  		{"foo", "a|b", time.Now().Truncate(time.Second), 12.34},
  2932  		{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
  2933  	}
  2934  
  2935  	writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
  2936  	mockDB.EXPECT().
  2937  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  2938  		Return(writeBatch, nil)
  2939  
  2940  	mockDB.EXPECT().
  2941  		WriteTaggedBatch(ctx, ident.NewIDMatcher(nsID), writeBatch, gomock.Any()).
  2942  		Return(nil)
  2943  
  2944  	var elements []*rpc.WriteTaggedBatchRawV2RequestElement
  2945  	for _, w := range values {
  2946  		elem := &rpc.WriteTaggedBatchRawV2RequestElement{
  2947  			NameSpace:   0,
  2948  			ID:          []byte(w.id),
  2949  			EncodedTags: []byte(w.tagEncode),
  2950  			Datapoint: &rpc.Datapoint{
  2951  				Timestamp:         w.t.Unix(),
  2952  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  2953  				Value:             w.v,
  2954  			},
  2955  		}
  2956  		elements = append(elements, elem)
  2957  	}
  2958  
  2959  	mockDB.EXPECT().IsOverloaded().Return(false)
  2960  	err := service.WriteTaggedBatchRawV2(tctx, &rpc.WriteTaggedBatchRawV2Request{
  2961  		NameSpaces: [][]byte{[]byte(nsID)},
  2962  		Elements:   elements,
  2963  	})
  2964  	require.NoError(t, err)
  2965  }
  2966  
  2967  func TestServiceWriteTaggedBatchRawV2MultiNS(t *testing.T) {
  2968  	ctrl := xtest.NewController(t)
  2969  	defer ctrl.Finish()
  2970  
  2971  	mockDB := storage.NewMockDatabase(ctrl)
  2972  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  2973  
  2974  	opts := tchannelthrift.NewOptions()
  2975  
  2976  	service := NewService(mockDB, opts).(*service)
  2977  
  2978  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  2979  	ctx := tchannelthrift.Context(tctx)
  2980  	defer ctx.Close()
  2981  
  2982  	var (
  2983  		nsID1  = "metrics"
  2984  		nsID2  = "more-metrics"
  2985  		values = []struct {
  2986  			id        string
  2987  			tagEncode string
  2988  			t         time.Time
  2989  			v         float64
  2990  		}{
  2991  			{"foo", "a|b", time.Now().Truncate(time.Second), 12.34},
  2992  			{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
  2993  		}
  2994  		writeBatch1 = writes.NewWriteBatch(len(values), ident.StringID(nsID1), nil)
  2995  		writeBatch2 = writes.NewWriteBatch(len(values), ident.StringID(nsID2), nil)
  2996  	)
  2997  
  2998  	mockDB.EXPECT().
  2999  		BatchWriter(ident.NewIDMatcher(nsID1), len(values)*2).
  3000  		Return(writeBatch1, nil)
  3001  	mockDB.EXPECT().
  3002  		BatchWriter(ident.NewIDMatcher(nsID2), len(values)*2).
  3003  		Return(writeBatch2, nil)
  3004  
  3005  	mockDB.EXPECT().
  3006  		WriteTaggedBatch(ctx, ident.NewIDMatcher(nsID1), writeBatch1, gomock.Any()).
  3007  		Return(nil)
  3008  	mockDB.EXPECT().
  3009  		WriteTaggedBatch(ctx, ident.NewIDMatcher(nsID2), writeBatch2, gomock.Any()).
  3010  		Return(nil)
  3011  
  3012  	var elements []*rpc.WriteTaggedBatchRawV2RequestElement
  3013  	for nsIdx := range []string{nsID1, nsID2} {
  3014  		for _, w := range values {
  3015  			elem := &rpc.WriteTaggedBatchRawV2RequestElement{
  3016  				NameSpace:   int64(nsIdx),
  3017  				ID:          []byte(w.id),
  3018  				EncodedTags: []byte(w.tagEncode),
  3019  				Datapoint: &rpc.Datapoint{
  3020  					Timestamp:         w.t.Unix(),
  3021  					TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  3022  					Value:             w.v,
  3023  				},
  3024  			}
  3025  			elements = append(elements, elem)
  3026  		}
  3027  	}
  3028  
  3029  	mockDB.EXPECT().IsOverloaded().Return(false)
  3030  	err := service.WriteTaggedBatchRawV2(tctx, &rpc.WriteTaggedBatchRawV2Request{
  3031  		NameSpaces: [][]byte{[]byte(nsID1), []byte(nsID2)},
  3032  		Elements:   elements,
  3033  	})
  3034  	require.NoError(t, err)
  3035  }
  3036  
  3037  func TestServiceWriteTaggedBatchRawOverloaded(t *testing.T) {
  3038  	ctrl := xtest.NewController(t)
  3039  	defer ctrl.Finish()
  3040  
  3041  	mockDB := storage.NewMockDatabase(ctrl)
  3042  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  3043  
  3044  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3045  
  3046  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3047  	ctx := tchannelthrift.Context(tctx)
  3048  	defer ctx.Close()
  3049  
  3050  	mockDB.EXPECT().IsOverloaded().Return(true)
  3051  	err := service.WriteTaggedBatchRaw(tctx, &rpc.WriteTaggedBatchRawRequest{
  3052  		NameSpace: []byte("metrics"),
  3053  	})
  3054  	require.Equal(t, tterrors.NewInternalError(errServerIsOverloaded), err)
  3055  }
  3056  
  3057  func TestServiceWriteTaggedBatchRawDatabaseNotSet(t *testing.T) {
  3058  	ctrl := xtest.NewController(t)
  3059  	defer ctrl.Finish()
  3060  
  3061  	var (
  3062  		service = NewService(nil, testTChannelThriftOptions).(*service)
  3063  		tctx, _ = tchannelthrift.NewContext(time.Minute)
  3064  		ctx     = tchannelthrift.Context(tctx)
  3065  	)
  3066  	defer ctx.Close()
  3067  
  3068  	err := service.WriteTaggedBatchRaw(tctx, &rpc.WriteTaggedBatchRawRequest{
  3069  		NameSpace: []byte("metrics"),
  3070  	})
  3071  	require.Equal(t, tterrors.NewInternalError(errDatabaseIsNotInitializedYet), err)
  3072  }
  3073  
  3074  func TestServiceWriteTaggedBatchRawUnknownError(t *testing.T) {
  3075  	ctrl := xtest.NewController(t)
  3076  	defer ctrl.Finish()
  3077  
  3078  	mockDB := storage.NewMockDatabase(ctrl)
  3079  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  3080  
  3081  	opts := tchannelthrift.NewOptions()
  3082  
  3083  	service := NewService(mockDB, opts).(*service)
  3084  
  3085  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3086  	ctx := tchannelthrift.Context(tctx)
  3087  	defer ctx.Close()
  3088  
  3089  	nsID := "metrics"
  3090  	unknownErr := fmt.Errorf("unknown-err")
  3091  	values := []struct {
  3092  		id        string
  3093  		tagEncode string
  3094  		t         time.Time
  3095  		v         float64
  3096  	}{
  3097  		{"foo", "a|b", time.Now().Truncate(time.Second), 12.34},
  3098  		{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
  3099  	}
  3100  
  3101  	mockDB.EXPECT().
  3102  		BatchWriter(ident.NewIDMatcher(nsID), len(values)).
  3103  		Return(nil, unknownErr)
  3104  
  3105  	var elements []*rpc.WriteTaggedBatchRawRequestElement
  3106  	for _, w := range values {
  3107  		elem := &rpc.WriteTaggedBatchRawRequestElement{
  3108  			ID:          []byte(w.id),
  3109  			EncodedTags: []byte(w.tagEncode),
  3110  			Datapoint: &rpc.Datapoint{
  3111  				Timestamp:         w.t.Unix(),
  3112  				TimestampTimeType: rpc.TimeType_UNIX_SECONDS,
  3113  				Value:             w.v,
  3114  			},
  3115  		}
  3116  		elements = append(elements, elem)
  3117  	}
  3118  
  3119  	mockDB.EXPECT().IsOverloaded().Return(false)
  3120  	err := service.WriteTaggedBatchRaw(tctx, &rpc.WriteTaggedBatchRawRequest{
  3121  		NameSpace: []byte(nsID),
  3122  		Elements:  elements,
  3123  	})
  3124  	require.Error(t, err)
  3125  	require.Equal(t, convert.ToRPCError(unknownErr), err)
  3126  }
  3127  
  3128  func TestServiceRepair(t *testing.T) {
  3129  	ctrl := xtest.NewController(t)
  3130  	defer ctrl.Finish()
  3131  
  3132  	mockDB := storage.NewMockDatabase(ctrl)
  3133  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  3134  	mockDB.EXPECT().IsOverloaded().Return(false)
  3135  
  3136  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3137  
  3138  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3139  	ctx := tchannelthrift.Context(tctx)
  3140  	defer ctx.Close()
  3141  
  3142  	mockDB.EXPECT().Repair().Return(nil)
  3143  
  3144  	err := service.Repair(tctx)
  3145  	require.NoError(t, err)
  3146  }
  3147  
  3148  func TestServiceTruncate(t *testing.T) {
  3149  	ctrl := xtest.NewController(t)
  3150  	defer ctrl.Finish()
  3151  
  3152  	mockDB := storage.NewMockDatabase(ctrl)
  3153  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  3154  	mockDB.EXPECT().IsOverloaded().Return(false)
  3155  
  3156  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3157  
  3158  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3159  	ctx := tchannelthrift.Context(tctx)
  3160  	defer ctx.Close()
  3161  
  3162  	nsID := "metrics"
  3163  
  3164  	truncated := int64(123)
  3165  
  3166  	mockDB.EXPECT().Truncate(ident.NewIDMatcher(nsID)).Return(truncated, nil)
  3167  
  3168  	r, err := service.Truncate(tctx, &rpc.TruncateRequest{NameSpace: []byte(nsID)})
  3169  	require.NoError(t, err)
  3170  	assert.Equal(t, truncated, r.NumSeries)
  3171  }
  3172  
  3173  func TestServiceSetPersistRateLimit(t *testing.T) {
  3174  	ctrl := xtest.NewController(t)
  3175  	defer ctrl.Finish()
  3176  
  3177  	runtimeOpts := runtime.NewOptions()
  3178  	runtimeOpts = runtimeOpts.SetPersistRateLimitOptions(
  3179  		runtimeOpts.PersistRateLimitOptions().
  3180  			SetLimitEnabled(false))
  3181  	runtimeOptsMgr := runtime.NewOptionsManager()
  3182  	require.NoError(t, runtimeOptsMgr.Update(runtimeOpts))
  3183  	opts := testStorageOpts.SetRuntimeOptionsManager(runtimeOptsMgr)
  3184  
  3185  	mockDB := storage.NewMockDatabase(ctrl)
  3186  	mockDB.EXPECT().Options().Return(opts).AnyTimes()
  3187  	mockDB.EXPECT().IsOverloaded().Return(false).AnyTimes()
  3188  
  3189  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3190  
  3191  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3192  	ctx := tchannelthrift.Context(tctx)
  3193  	defer ctx.Close()
  3194  
  3195  	getResp, err := service.GetPersistRateLimit(tctx)
  3196  	require.NoError(t, err)
  3197  	assert.Equal(t, false, getResp.LimitEnabled)
  3198  
  3199  	enable := true
  3200  	req := &rpc.NodeSetPersistRateLimitRequest{
  3201  		LimitEnabled: &enable,
  3202  	}
  3203  	setResp, err := service.SetPersistRateLimit(tctx, req)
  3204  	require.NoError(t, err)
  3205  	assert.Equal(t, true, setResp.LimitEnabled)
  3206  }
  3207  
  3208  func TestServiceSetWriteNewSeriesAsync(t *testing.T) {
  3209  	ctrl := xtest.NewController(t)
  3210  	defer ctrl.Finish()
  3211  
  3212  	runtimeOpts := runtime.NewOptions().
  3213  		SetWriteNewSeriesAsync(false)
  3214  	runtimeOptsMgr := runtime.NewOptionsManager()
  3215  	require.NoError(t, runtimeOptsMgr.Update(runtimeOpts))
  3216  	opts := testStorageOpts.SetRuntimeOptionsManager(runtimeOptsMgr)
  3217  
  3218  	mockDB := storage.NewMockDatabase(ctrl)
  3219  	mockDB.EXPECT().Options().Return(opts).AnyTimes()
  3220  	mockDB.EXPECT().IsOverloaded().Return(false).AnyTimes()
  3221  
  3222  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3223  
  3224  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3225  	ctx := tchannelthrift.Context(tctx)
  3226  	defer ctx.Close()
  3227  
  3228  	getResp, err := service.GetWriteNewSeriesAsync(tctx)
  3229  	require.NoError(t, err)
  3230  	assert.Equal(t, false, getResp.WriteNewSeriesAsync)
  3231  
  3232  	req := &rpc.NodeSetWriteNewSeriesAsyncRequest{
  3233  		WriteNewSeriesAsync: true,
  3234  	}
  3235  	setResp, err := service.SetWriteNewSeriesAsync(tctx, req)
  3236  	require.NoError(t, err)
  3237  	assert.Equal(t, true, setResp.WriteNewSeriesAsync)
  3238  }
  3239  
  3240  func TestServiceSetWriteNewSeriesBackoffDuration(t *testing.T) {
  3241  	ctrl := xtest.NewController(t)
  3242  	defer ctrl.Finish()
  3243  
  3244  	runtimeOpts := runtime.NewOptions().
  3245  		SetWriteNewSeriesBackoffDuration(3 * time.Millisecond)
  3246  	runtimeOptsMgr := runtime.NewOptionsManager()
  3247  	require.NoError(t, runtimeOptsMgr.Update(runtimeOpts))
  3248  	opts := testStorageOpts.SetRuntimeOptionsManager(runtimeOptsMgr)
  3249  
  3250  	mockDB := storage.NewMockDatabase(ctrl)
  3251  	mockDB.EXPECT().Options().Return(opts).AnyTimes()
  3252  	mockDB.EXPECT().IsOverloaded().Return(false).AnyTimes()
  3253  
  3254  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3255  
  3256  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3257  	ctx := tchannelthrift.Context(tctx)
  3258  	defer ctx.Close()
  3259  
  3260  	getResp, err := service.GetWriteNewSeriesBackoffDuration(tctx)
  3261  	require.NoError(t, err)
  3262  	assert.Equal(t, int64(3), getResp.WriteNewSeriesBackoffDuration)
  3263  	assert.Equal(t, rpc.TimeType_UNIX_MILLISECONDS, getResp.DurationType)
  3264  
  3265  	req := &rpc.NodeSetWriteNewSeriesBackoffDurationRequest{
  3266  		WriteNewSeriesBackoffDuration: 1,
  3267  		DurationType:                  rpc.TimeType_UNIX_SECONDS,
  3268  	}
  3269  	setResp, err := service.SetWriteNewSeriesBackoffDuration(tctx, req)
  3270  	require.NoError(t, err)
  3271  	assert.Equal(t, int64(1000), setResp.WriteNewSeriesBackoffDuration)
  3272  	assert.Equal(t, rpc.TimeType_UNIX_MILLISECONDS, setResp.DurationType)
  3273  }
  3274  
  3275  func TestServiceSetWriteNewSeriesLimitPerShardPerSecond(t *testing.T) {
  3276  	ctrl := xtest.NewController(t)
  3277  	defer ctrl.Finish()
  3278  
  3279  	runtimeOpts := runtime.NewOptions().
  3280  		SetWriteNewSeriesLimitPerShardPerSecond(42)
  3281  	runtimeOptsMgr := runtime.NewOptionsManager()
  3282  	require.NoError(t, runtimeOptsMgr.Update(runtimeOpts))
  3283  	opts := testStorageOpts.SetRuntimeOptionsManager(runtimeOptsMgr)
  3284  
  3285  	mockDB := storage.NewMockDatabase(ctrl)
  3286  	mockDB.EXPECT().Options().Return(opts).AnyTimes()
  3287  	mockDB.EXPECT().IsOverloaded().Return(false).AnyTimes()
  3288  
  3289  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3290  
  3291  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3292  	ctx := tchannelthrift.Context(tctx)
  3293  	defer ctx.Close()
  3294  
  3295  	getResp, err := service.GetWriteNewSeriesLimitPerShardPerSecond(tctx)
  3296  	require.NoError(t, err)
  3297  	assert.Equal(t, int64(42), getResp.WriteNewSeriesLimitPerShardPerSecond)
  3298  
  3299  	req := &rpc.NodeSetWriteNewSeriesLimitPerShardPerSecondRequest{
  3300  		WriteNewSeriesLimitPerShardPerSecond: 84,
  3301  	}
  3302  	setResp, err := service.SetWriteNewSeriesLimitPerShardPerSecond(tctx, req)
  3303  	require.NoError(t, err)
  3304  	assert.Equal(t, int64(84), setResp.WriteNewSeriesLimitPerShardPerSecond)
  3305  }
  3306  
  3307  func TestServiceAggregateTiles(t *testing.T) {
  3308  	ctrl := xtest.NewController(t)
  3309  	defer ctrl.Finish()
  3310  
  3311  	mockDB := storage.NewMockDatabase(ctrl)
  3312  	mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
  3313  	mockDB.EXPECT().IsOverloaded().Return(false)
  3314  
  3315  	service := NewService(mockDB, testTChannelThriftOptions).(*service)
  3316  
  3317  	tctx, _ := tchannelthrift.NewContext(time.Minute)
  3318  	ctx := tchannelthrift.Context(tctx)
  3319  	defer ctx.Close()
  3320  
  3321  	start := xtime.Now().Truncate(time.Hour).Add(-1 * time.Hour)
  3322  	end := start.Add(time.Hour)
  3323  
  3324  	start, end = start.Truncate(time.Second), end.Truncate(time.Second)
  3325  
  3326  	step := "10m"
  3327  	stepDuration, err := time.ParseDuration(step)
  3328  	require.NoError(t, err)
  3329  
  3330  	sourceNsID := "source"
  3331  	targetNsID := "target"
  3332  
  3333  	mockDB.EXPECT().AggregateTiles(
  3334  		ctx,
  3335  		ident.NewIDMatcher(sourceNsID),
  3336  		ident.NewIDMatcher(targetNsID),
  3337  		gomock.Any(),
  3338  	).DoAndReturn(func(gotCtx, gotSourceNsID, gotTargetNsID interface{}, opts storage.AggregateTilesOptions) (int64, error) {
  3339  		require.NotNil(t, opts)
  3340  		require.Equal(t, start, opts.Start)
  3341  		require.Equal(t, end, opts.End)
  3342  		require.Equal(t, stepDuration, opts.Step)
  3343  		require.NotNil(t, opts.InsOptions)
  3344  		return int64(4), nil
  3345  	})
  3346  
  3347  	result, err := service.AggregateTiles(tctx, &rpc.AggregateTilesRequest{
  3348  		SourceNamespace: sourceNsID,
  3349  		TargetNamespace: targetNsID,
  3350  		RangeStart:      start.Seconds(),
  3351  		RangeEnd:        end.Seconds(),
  3352  		Step:            step,
  3353  		RangeType:       rpc.TimeType_UNIX_SECONDS,
  3354  	})
  3355  	require.NoError(t, err)
  3356  	assert.Equal(t, int64(4), result.ProcessedTileCount)
  3357  }