github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ts/db_test.go (about)

     1  // Copyright 2015 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package ts
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"math"
    17  	"reflect"
    18  	"strconv"
    19  	"strings"
    20  	"sync"
    21  	"testing"
    22  	"time"
    23  
    24  	"github.com/cockroachdb/cockroach/pkg/keys"
    25  	"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
    26  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    27  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    28  	"github.com/cockroachdb/cockroach/pkg/storage"
    29  	"github.com/cockroachdb/cockroach/pkg/testutils"
    30  	"github.com/cockroachdb/cockroach/pkg/testutils/localtestcluster"
    31  	"github.com/cockroachdb/cockroach/pkg/ts/testmodel"
    32  	"github.com/cockroachdb/cockroach/pkg/ts/tspb"
    33  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    34  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    35  	"github.com/cockroachdb/cockroach/pkg/util/log"
    36  	"github.com/cockroachdb/cockroach/pkg/util/mon"
    37  	"github.com/cockroachdb/cockroach/pkg/util/stop"
    38  	"github.com/cockroachdb/cockroach/pkg/util/tracing"
    39  	"github.com/cockroachdb/errors"
    40  	"github.com/kr/pretty"
    41  )
    42  
    43  // testModelRunner is a model-based testing structure used to verify that time
    44  // series data sent to the Cockroach time series DB is stored correctly.
    45  //
    46  // This structure maintains a single ts.DB instance which stores data in a
    47  // monolithic Cockroach Store. It additionally maintains a test-model, a fully
    48  // in-memory implementation of the CockroachDB storage and query system. The
    49  // test model is unoptimized and in-memory, making it easier to understand than
    50  // the distributed and highly-optimized system used by real queries. The model
    51  // is used to generate expected results for test cases automatically.
    52  //
    53  // Each test should send a series of commands to the testModelRunner. Commands
    54  // are dispatched to both the ts.DB instance and the test model. Queries are
    55  // executed against both, and the results should match exactly.
    56  //
    57  // In addition, the test model can be used to generate an expecation of the
    58  // on-disk layout in the ts.DB instance; the tests should periodically assert
    59  // that the expectation matches reality.
    60  //
    61  // Finally, testModelRunner provides a small number of sanity checks
    62  // (assertKeyCount) that ensure that the real data does not trivially match the
    63  // model due to an improperly constructed test case.
    64  type testModelRunner struct {
    65  	*localtestcluster.LocalTestCluster
    66  	t                 testing.TB
    67  	DB                *DB
    68  	model             *testmodel.ModelDB
    69  	workerMemMonitor  *mon.BytesMonitor
    70  	resultMemMonitor  *mon.BytesMonitor
    71  	queryMemoryBudget int64
    72  	// firstColumnarTimestamp is a map from a string name for a series to the
    73  	// first timestamp at which columnar data was inserted into that timestamp.
    74  	// This is used when computing the expected on-disk layout from the model.
    75  	firstColumnarTimestamp map[string]int64
    76  }
    77  
    78  // newTestModelRunner creates a new testModel instance. The Start() method must
    79  // be called before using it.
    80  func newTestModelRunner(t *testing.T) testModelRunner {
    81  	st := cluster.MakeTestingClusterSettings()
    82  	workerMonitor := mon.MakeUnlimitedMonitor(
    83  		context.Background(),
    84  		"timeseries-test-worker",
    85  		mon.MemoryResource,
    86  		nil,
    87  		nil,
    88  		math.MaxInt64,
    89  		st,
    90  	)
    91  	resultMonitor := mon.MakeUnlimitedMonitor(
    92  		context.Background(),
    93  		"timeseries-test-result",
    94  		mon.MemoryResource,
    95  		nil,
    96  		nil,
    97  		math.MaxInt64,
    98  		st,
    99  	)
   100  	return testModelRunner{
   101  		t:                      t,
   102  		model:                  testmodel.NewModelDB(),
   103  		LocalTestCluster:       &localtestcluster.LocalTestCluster{},
   104  		workerMemMonitor:       &workerMonitor,
   105  		resultMemMonitor:       &resultMonitor,
   106  		queryMemoryBudget:      math.MaxInt64,
   107  		firstColumnarTimestamp: make(map[string]int64),
   108  	}
   109  }
   110  
   111  // Start constructs and starts the local test server and creates a
   112  // time series DB.
   113  func (tm *testModelRunner) Start() {
   114  	tm.LocalTestCluster.Start(tm.t, testutils.NewNodeTestBaseContext(),
   115  		kvcoord.InitFactoryForLocalTestCluster)
   116  	tm.DB = NewDB(tm.LocalTestCluster.DB, tm.Cfg.Settings)
   117  }
   118  
   119  // getActualData returns the actual value of all time series keys in the
   120  // underlying engine. Data is returned as a map of strings to roachpb.Values.
   121  func (tm *testModelRunner) getActualData() map[string]roachpb.Value {
   122  	// Scan over all TS Keys stored in the engine
   123  	startKey := keys.TimeseriesPrefix
   124  	endKey := startKey.PrefixEnd()
   125  	res, err := storage.MVCCScan(context.Background(), tm.Eng, startKey, endKey, tm.Clock.Now(), storage.MVCCScanOptions{})
   126  	if err != nil {
   127  		tm.t.Fatalf("error scanning TS data from engine: %s", err)
   128  	}
   129  
   130  	kvMap := make(map[string]roachpb.Value)
   131  	for _, kv := range res.KVs {
   132  		kvMap[string(kv.Key)] = kv.Value
   133  	}
   134  
   135  	return kvMap
   136  }
   137  
   138  // assertModelCorrect asserts that the model data being maintained by this
   139  // testModel is equivalent to the actual time series data stored in the
   140  // engine. If the actual data does not match the model, this method will print
   141  // out detailed information about the differences between the two data sets.
   142  func (tm *testModelRunner) assertModelCorrect() {
   143  	tm.t.Helper()
   144  	actualData := tm.getActualData()
   145  	modelDisk := tm.getModelDiskLayout()
   146  	if a, e := actualData, modelDisk; !reflect.DeepEqual(a, e) {
   147  		for _, diff := range pretty.Diff(a, e) {
   148  			tm.t.Error(diff)
   149  		}
   150  	}
   151  }
   152  
   153  func (tm *testModelRunner) getModelDiskLayout() map[string]roachpb.Value {
   154  	result := make(map[string]roachpb.Value)
   155  	tm.model.VisitAllSeries(func(name, source string, data testmodel.DataSeries) (testmodel.DataSeries, bool) {
   156  		// For computing the expected disk layout, only consider resolution-specific
   157  		// series.
   158  		resolution, seriesName, valid := getResolutionFromKey(name)
   159  		if !valid {
   160  			return data, false
   161  		}
   162  
   163  		// The on-disk model discards all samples in each sample period except for
   164  		// the last one.
   165  		if !resolution.IsRollup() {
   166  			data = data.GroupByResolution(resolution.SampleDuration(), testmodel.AggregateLast)
   167  		}
   168  
   169  		// Depending on when column-based storage was activated, some slabs will
   170  		// be in row format and others in column format. Find the dividing line
   171  		// and generate two sets of slabs.
   172  		var allSlabs []roachpb.InternalTimeSeriesData
   173  		addSlabs := func(datapoints testmodel.DataSeries, columnar bool) {
   174  			tsdata := tspb.TimeSeriesData{
   175  				Name:       seriesName,
   176  				Source:     source,
   177  				Datapoints: datapoints,
   178  			}
   179  			// Convert rollup resolutions before converting to slabs.
   180  			var slabs []roachpb.InternalTimeSeriesData
   181  			var err error
   182  			if resolution.IsRollup() {
   183  				rollup := computeRollupsFromData(tsdata, resolution.SampleDuration())
   184  				slabs, err = rollup.toInternal(resolution.SlabDuration(), resolution.SampleDuration())
   185  			} else {
   186  				slabs, err = tsdata.ToInternal(resolution.SlabDuration(), resolution.SampleDuration(), columnar)
   187  			}
   188  			if err != nil {
   189  				tm.t.Fatalf("error converting testmodel data to internal format: %s", err.Error())
   190  			}
   191  			allSlabs = append(allSlabs, slabs...)
   192  		}
   193  
   194  		if resolution.IsRollup() {
   195  			addSlabs(data, true)
   196  		} else {
   197  			firstColumnTime, hasColumns := tm.firstColumnarTimestamp[name]
   198  			if !hasColumns {
   199  				addSlabs(data, false)
   200  			} else {
   201  				firstColumnTime = resolution.normalizeToSlab(firstColumnTime)
   202  				addSlabs(data.TimeSlice(math.MinInt64, firstColumnTime), false)
   203  				addSlabs(data.TimeSlice(firstColumnTime, math.MaxInt64), true)
   204  			}
   205  		}
   206  
   207  		for _, slab := range allSlabs {
   208  			key := MakeDataKey(seriesName, source, resolution, slab.StartTimestampNanos)
   209  			keyStr := string(key)
   210  			var val roachpb.Value
   211  			if err := val.SetProto(&slab); err != nil {
   212  				tm.t.Fatal(err)
   213  			}
   214  			result[keyStr] = val
   215  		}
   216  
   217  		return data, false
   218  	})
   219  
   220  	return result
   221  }
   222  
   223  // assertKeyCount asserts that the model contains the expected number of keys.
   224  // This is used to ensure that data is actually being generated in the test
   225  // model.
   226  func (tm *testModelRunner) assertKeyCount(expected int) {
   227  	tm.t.Helper()
   228  	if a, e := len(tm.getModelDiskLayout()), expected; a != e {
   229  		tm.t.Errorf("model data key count did not match expected value: %d != %d", a, e)
   230  	}
   231  }
   232  
   233  func (tm *testModelRunner) storeInModel(r Resolution, data tspb.TimeSeriesData) {
   234  	if !TimeseriesStorageEnabled.Get(&tm.Cfg.Settings.SV) {
   235  		return
   236  	}
   237  
   238  	key := resolutionModelKey(data.Name, r)
   239  	if tm.DB.WriteColumnar() {
   240  		firstColumar, ok := tm.firstColumnarTimestamp[key]
   241  		if candidate := data.Datapoints[0].TimestampNanos; !ok || candidate < firstColumar {
   242  			tm.firstColumnarTimestamp[key] = candidate
   243  		}
   244  	}
   245  	tm.model.Record(key, data.Source, data.Datapoints)
   246  }
   247  
   248  // resolutionModelKey returns a string to store resolution-specific data in
   249  // the test model.
   250  func resolutionModelKey(name string, r Resolution) string {
   251  	return fmt.Sprintf("@%d.%s", r, name)
   252  }
   253  
   254  func getResolutionFromKey(key string) (Resolution, string, bool) {
   255  	if len(key) < 3 || !strings.HasPrefix(key, "@") {
   256  		return 0, key, false
   257  	}
   258  
   259  	parts := strings.SplitN(key[1:], ".", 2)
   260  	if len(parts) != 2 {
   261  		return 0, key, false
   262  	}
   263  
   264  	val, err := strconv.ParseInt(parts[0], 10, 64)
   265  	if err != nil {
   266  		return 0, key, false
   267  	}
   268  
   269  	return Resolution(val), parts[1], true
   270  }
   271  
   272  // storeTimeSeriesData instructs the model to store the given time series data
   273  // in both the model and the system under test.
   274  func (tm *testModelRunner) storeTimeSeriesData(r Resolution, data []tspb.TimeSeriesData) {
   275  	// Store data in the system under test.
   276  	if r.IsRollup() {
   277  		// For rollup resolutions, compute the rollupData from the time series
   278  		// data and store the rollup data.
   279  		var rdata []rollupData
   280  		for _, d := range data {
   281  			rdata = append(rdata, computeRollupsFromData(d, r.SampleDuration()))
   282  		}
   283  		if err := tm.DB.storeRollup(context.Background(), r, rdata); err != nil {
   284  			tm.t.Fatalf("error storing time series rollups: %s", err)
   285  		}
   286  	} else {
   287  		if err := tm.DB.StoreData(context.Background(), r, data); err != nil {
   288  			tm.t.Fatalf("error storing time series data: %s", err)
   289  		}
   290  	}
   291  
   292  	// store data in the model. Even for rollup resolutoins we store the original
   293  	// data points in the model, with the expectation that queries will be
   294  	// identical to those based on rollups.
   295  	for _, d := range data {
   296  		tm.storeInModel(r, d)
   297  	}
   298  }
   299  
   300  // prune time series from the model. "nowNanos" represents the current time,
   301  // and is used to compute threshold ages. Only time series in the provided list
   302  // of time series/resolution pairs will be considered for deletion.
   303  func (tm *testModelRunner) prune(nowNanos int64, timeSeries ...timeSeriesResolutionInfo) {
   304  	// Prune time series from the system under test.
   305  	if err := tm.DB.pruneTimeSeries(
   306  		context.Background(),
   307  		tm.LocalTestCluster.DB,
   308  		timeSeries,
   309  		hlc.Timestamp{
   310  			WallTime: nowNanos,
   311  			Logical:  0,
   312  		},
   313  	); err != nil {
   314  		tm.t.Fatalf("error pruning time series data: %s", err)
   315  	}
   316  
   317  	// Prune the appropriate resolution-specific series from the test model using
   318  	// VisitSeries.
   319  	thresholds := tm.DB.computeThresholds(nowNanos)
   320  	for _, ts := range timeSeries {
   321  		tm.model.VisitSeries(
   322  			resolutionModelKey(ts.Name, ts.Resolution),
   323  			func(name, source string, data testmodel.DataSeries) (testmodel.DataSeries, bool) {
   324  				pruned := data.TimeSlice(thresholds[ts.Resolution], math.MaxInt64)
   325  				if len(pruned) != len(data) {
   326  					return pruned, true
   327  				}
   328  				return data, false
   329  			},
   330  		)
   331  	}
   332  }
   333  
   334  // rollup time series from the model. "nowNanos" represents the current time,
   335  // and is used to compute threshold ages. Only time series in the provided list
   336  // of time series/resolution pairs will be considered for rollup.
   337  func (tm *testModelRunner) rollup(nowNanos int64, timeSeries ...timeSeriesResolutionInfo) {
   338  	// Rollup time series from the system under test.
   339  	qmc := MakeQueryMemoryContext(tm.workerMemMonitor, tm.resultMemMonitor, QueryMemoryOptions{
   340  		// Large budget, but not maximum to avoid overflows.
   341  		BudgetBytes:             math.MaxInt64,
   342  		EstimatedSources:        1, // Not needed for rollups
   343  		InterpolationLimitNanos: 0,
   344  		Columnar:                tm.DB.WriteColumnar(),
   345  	})
   346  	tm.rollupWithMemoryContext(qmc, nowNanos, timeSeries...)
   347  }
   348  
   349  // rollupWithMemoryContext performs the rollup operation using a custom memory
   350  // context).
   351  func (tm *testModelRunner) rollupWithMemoryContext(
   352  	qmc QueryMemoryContext, nowNanos int64, timeSeries ...timeSeriesResolutionInfo,
   353  ) {
   354  	if err := tm.DB.rollupTimeSeries(
   355  		context.Background(),
   356  		timeSeries,
   357  		hlc.Timestamp{
   358  			WallTime: nowNanos,
   359  			Logical:  0,
   360  		},
   361  		qmc,
   362  	); err != nil {
   363  		tm.t.Fatalf("error rolling up time series data: %s", err)
   364  	}
   365  
   366  	// Prune the appropriate resolution-specific series from the test model using
   367  	// VisitSeries.
   368  	thresholds := tm.DB.computeThresholds(nowNanos)
   369  	for _, ts := range timeSeries {
   370  		// Track any data series which are pruned from the original resolution -
   371  		// they will be recorded into the rollup resolution.
   372  		type sourceDataPair struct {
   373  			source string
   374  			data   testmodel.DataSeries
   375  		}
   376  		var toRecord []sourceDataPair
   377  
   378  		// Visit each data series for the given name and resolution (may have multiple
   379  		// sources). Prune the data down to *only* time periods after the pruning
   380  		// thresholds - additionally, record any pruned data into the target rollup
   381  		// resolution for this resolution.
   382  		tm.model.VisitSeries(
   383  			resolutionModelKey(ts.Name, ts.Resolution),
   384  			func(name, source string, data testmodel.DataSeries) (testmodel.DataSeries, bool) {
   385  				if rollupData := data.TimeSlice(0, thresholds[ts.Resolution]); len(rollupData) > 0 {
   386  					toRecord = append(toRecord, sourceDataPair{
   387  						source: source,
   388  						data:   rollupData,
   389  					})
   390  				}
   391  				return data, false
   392  			},
   393  		)
   394  		for _, data := range toRecord {
   395  			targetResolution, _ := ts.Resolution.TargetRollupResolution()
   396  			tm.model.Record(
   397  				resolutionModelKey(ts.Name, targetResolution),
   398  				data.source,
   399  				data.data,
   400  			)
   401  		}
   402  	}
   403  }
   404  
   405  // maintain calls the same operation called by the TS maintenance queue,
   406  // simulating the effects in the model at the same time.
   407  func (tm *testModelRunner) maintain(nowNanos int64) {
   408  	snap := tm.Store.Engine().NewSnapshot()
   409  	defer snap.Close()
   410  	if err := tm.DB.MaintainTimeSeries(
   411  		context.Background(),
   412  		snap,
   413  		roachpb.RKey(keys.TimeseriesPrefix),
   414  		roachpb.RKey(keys.TimeseriesKeyMax),
   415  		tm.LocalTestCluster.DB,
   416  		tm.workerMemMonitor,
   417  		math.MaxInt64,
   418  		hlc.Timestamp{
   419  			WallTime: nowNanos,
   420  			Logical:  0,
   421  		},
   422  	); err != nil {
   423  		tm.t.Fatalf("error maintaining time series data: %s", err)
   424  	}
   425  
   426  	// Prune the appropriate resolution-specific series from the test model using
   427  	// VisitSeries.
   428  	thresholds := tm.DB.computeThresholds(nowNanos)
   429  
   430  	// Track any data series which has been marked for rollup, and record it into
   431  	// the correct target resolution.
   432  	type rollupRecordingData struct {
   433  		name   string
   434  		source string
   435  		res    Resolution
   436  		data   testmodel.DataSeries
   437  	}
   438  	var toRecord []rollupRecordingData
   439  
   440  	// Visit each data series in the model, pruning and computing rollups.
   441  	tm.model.VisitAllSeries(
   442  		func(name, source string, data testmodel.DataSeries) (testmodel.DataSeries, bool) {
   443  			res, seriesName, ok := getResolutionFromKey(name)
   444  			if !ok {
   445  				return data, false
   446  			}
   447  			targetResolution, hasRollup := res.TargetRollupResolution()
   448  			if hasRollup && tm.DB.WriteRollups() {
   449  				pruned := data.TimeSlice(thresholds[res], math.MaxInt64)
   450  				if len(pruned) != len(data) {
   451  					toRecord = append(toRecord, rollupRecordingData{
   452  						name:   seriesName,
   453  						source: source,
   454  						res:    targetResolution,
   455  						data:   data.TimeSlice(0, thresholds[res]),
   456  					})
   457  					return pruned, true
   458  				}
   459  			} else if !hasRollup || !tm.DB.WriteRollups() {
   460  				pruned := data.TimeSlice(thresholds[res], math.MaxInt64)
   461  				if len(pruned) != len(data) {
   462  					return pruned, true
   463  				}
   464  			}
   465  			return data, false
   466  		},
   467  	)
   468  	for _, data := range toRecord {
   469  		tm.model.Record(
   470  			resolutionModelKey(data.name, data.res),
   471  			data.source,
   472  			data.data,
   473  		)
   474  	}
   475  }
   476  
   477  // modelQuery encapsulates all of the parameters to execute a query along with
   478  // some context for executing that query. This structure is a useful abstraction
   479  // for tests, when tests utilize default values for most query fields but
   480  // *all* fields are modified in at least one test.
   481  type modelQuery struct {
   482  	tspb.Query
   483  	QueryTimespan
   484  	QueryMemoryOptions
   485  	diskResolution   Resolution
   486  	workerMemMonitor *mon.BytesMonitor
   487  	resultMemMonitor *mon.BytesMonitor
   488  	modelRunner      *testModelRunner
   489  }
   490  
   491  // makeQuery creates a new modelQuery which executes using this testModelRunner.
   492  // The new query executes against the given named metric and diskResolution,
   493  // querying between the provided start and end bounds. Useful defaults are set
   494  // for all other fields.
   495  func (tm *testModelRunner) makeQuery(
   496  	name string, diskResolution Resolution, startNanos, endNanos int64,
   497  ) modelQuery {
   498  	currentEstimatedSources := tm.model.UniqueSourceCount()
   499  	if currentEstimatedSources == 0 {
   500  		currentEstimatedSources = 1
   501  	}
   502  
   503  	return modelQuery{
   504  		Query: tspb.Query{
   505  			Name: name,
   506  		},
   507  		QueryTimespan: QueryTimespan{
   508  			StartNanos:          startNanos,
   509  			EndNanos:            endNanos,
   510  			SampleDurationNanos: diskResolution.SampleDuration(),
   511  			NowNanos:            math.MaxInt64,
   512  		},
   513  		QueryMemoryOptions: QueryMemoryOptions{
   514  			// Large budget, but not maximum to avoid overflows.
   515  			BudgetBytes:             math.MaxInt64,
   516  			EstimatedSources:        currentEstimatedSources,
   517  			InterpolationLimitNanos: 0,
   518  			Columnar:                tm.DB.WriteColumnar(),
   519  		},
   520  		diskResolution:   diskResolution,
   521  		workerMemMonitor: tm.workerMemMonitor,
   522  		resultMemMonitor: tm.resultMemMonitor,
   523  		modelRunner:      tm,
   524  	}
   525  }
   526  
   527  // setSourceAggregator sets the source aggregator of the query. This is a
   528  // convenience method to avoid having to call Enum().
   529  func (mq *modelQuery) setSourceAggregator(agg tspb.TimeSeriesQueryAggregator) {
   530  	mq.SourceAggregator = agg.Enum()
   531  }
   532  
   533  // setDownsampler sets the downsampler of the query. This is a convenience
   534  // method to avoid having to call Enum().
   535  func (mq *modelQuery) setDownsampler(agg tspb.TimeSeriesQueryAggregator) {
   536  	mq.Downsampler = agg.Enum()
   537  }
   538  
   539  // setDerivative sets the derivative function of the query. This is a
   540  // convenience method to avoid having to call Enum().
   541  func (mq *modelQuery) setDerivative(deriv tspb.TimeSeriesQueryDerivative) {
   542  	mq.Derivative = deriv.Enum()
   543  }
   544  
   545  // queryDB queries the actual database using the configured parameters of the
   546  // model query.
   547  func (mq *modelQuery) queryDB() ([]tspb.TimeSeriesDatapoint, []string, error) {
   548  	// Query the actual server.
   549  	memContext := MakeQueryMemoryContext(
   550  		mq.workerMemMonitor, mq.resultMemMonitor, mq.QueryMemoryOptions,
   551  	)
   552  	defer memContext.Close(context.Background())
   553  	return mq.modelRunner.DB.Query(
   554  		context.Background(), mq.Query, mq.diskResolution, mq.QueryTimespan, memContext,
   555  	)
   556  }
   557  
   558  func (mq *modelQuery) queryModel() testmodel.DataSeries {
   559  	var result testmodel.DataSeries
   560  	startTime := mq.StartNanos
   561  	if rollupResolution, ok := mq.diskResolution.TargetRollupResolution(); ok &&
   562  		mq.verifyDiskResolution(rollupResolution) == nil {
   563  		result = mq.modelRunner.model.Query(
   564  			resolutionModelKey(mq.Name, rollupResolution),
   565  			mq.Sources,
   566  			mq.GetDownsampler(),
   567  			mq.GetSourceAggregator(),
   568  			mq.GetDerivative(),
   569  			rollupResolution.SlabDuration(),
   570  			mq.SampleDurationNanos,
   571  			mq.StartNanos,
   572  			mq.EndNanos,
   573  			mq.InterpolationLimitNanos,
   574  			mq.NowNanos,
   575  		)
   576  		if len(result) > 0 {
   577  			startTime = result[len(result)-1].TimestampNanos
   578  		}
   579  	}
   580  	result = append(result, mq.modelRunner.model.Query(
   581  		resolutionModelKey(mq.Name, mq.diskResolution),
   582  		mq.Sources,
   583  		mq.GetDownsampler(),
   584  		mq.GetSourceAggregator(),
   585  		mq.GetDerivative(),
   586  		mq.diskResolution.SlabDuration(),
   587  		mq.SampleDurationNanos,
   588  		startTime,
   589  		mq.EndNanos,
   590  		mq.InterpolationLimitNanos,
   591  		mq.NowNanos,
   592  	)...)
   593  	return result
   594  }
   595  
   596  // assertSuccess runs the query against both the real database and the model
   597  // database, ensuring that the query succeeds and that the real result matches
   598  // the model result. The two supplied parameters are a form of sanity check,
   599  // ensuring that the query actually performed the expected work (to avoid a
   600  // situation where both the model and the real database return the same
   601  // unexpected result because the query was incorrectly constructed).
   602  func (mq *modelQuery) assertSuccess(expectedDatapointCount, expectedSourceCount int) {
   603  	mq.modelRunner.t.Helper()
   604  
   605  	// Query the real DB.
   606  	actualDatapoints, actualSources, err := mq.queryDB()
   607  	if err != nil {
   608  		mq.modelRunner.t.Fatal(err)
   609  	}
   610  
   611  	// Query the model.
   612  	modelDatapoints := mq.queryModel()
   613  	if a, e := testmodel.DataSeries(actualDatapoints), modelDatapoints; !testmodel.DataSeriesEquivalent(a, e) {
   614  		for _, diff := range pretty.Diff(a, e) {
   615  			mq.modelRunner.t.Error(diff)
   616  		}
   617  	}
   618  	if a, e := len(actualDatapoints), expectedDatapointCount; a != e {
   619  		mq.modelRunner.t.Logf("actual datapoints: %v", actualDatapoints)
   620  		mq.modelRunner.t.Logf("model datapoints: %v", modelDatapoints)
   621  		mq.modelRunner.t.Fatal(errors.Errorf("query got %d datapoints, wanted %d", a, e))
   622  	}
   623  	if a, e := len(actualSources), expectedSourceCount; a != e {
   624  		mq.modelRunner.t.Logf("actual sources: %v", actualSources)
   625  		mq.modelRunner.t.Fatal(errors.Errorf("query got %d sources, wanted %d", a, e))
   626  	}
   627  }
   628  
   629  // assertMatchesModel asserts that the results of the query are identical when
   630  // executed against the real database and the model. This is the same as
   631  // assertSuccess, but does not include the sanity checks for datapoint count and
   632  // source count. This method is intended for use in tests which are generated
   633  // procedurally.
   634  func (mq *modelQuery) assertMatchesModel() {
   635  	mq.modelRunner.t.Helper()
   636  	// Query the real DB.
   637  	actualDatapoints, _, err := mq.queryDB()
   638  	if err != nil {
   639  		mq.modelRunner.t.Fatal(err)
   640  	}
   641  
   642  	// Query the model.
   643  	modelDatapoints := mq.queryModel()
   644  	if a, e := testmodel.DataSeries(actualDatapoints), modelDatapoints; !testmodel.DataSeriesEquivalent(a, e) {
   645  		mq.modelRunner.t.Errorf("actual %v expected %v", a, e)
   646  		for _, diff := range pretty.Diff(a, e) {
   647  			mq.modelRunner.t.Error(diff)
   648  		}
   649  	}
   650  }
   651  
   652  // assertError runs the query against the real database and asserts that the
   653  // database returns an error. The error's message must match the supplied
   654  // string.
   655  func (mq *modelQuery) assertError(errString string) {
   656  	mq.modelRunner.t.Helper()
   657  	_, _, err := mq.queryDB()
   658  	if err == nil {
   659  		mq.modelRunner.t.Fatalf(
   660  			"query got no error, wanted error with message matching  \"%s\"", errString,
   661  		)
   662  	}
   663  	if !testutils.IsError(err, errString) {
   664  		mq.modelRunner.t.Fatalf(
   665  			"query got error \"%s\", wanted error with message matching \"%s\"", err.Error(), errString,
   666  		)
   667  	}
   668  }
   669  
   670  // modelDataSource is used to create a mock DataSource. It returns a
   671  // deterministic set of data to GetTimeSeriesData, storing the returned data in
   672  // the model whenever GetTimeSeriesData is called. Data is returned until all
   673  // sets are exhausted, at which point the supplied stop.Stopper is stopped.
   674  type modelDataSource struct {
   675  	model       testModelRunner
   676  	datasets    [][]tspb.TimeSeriesData
   677  	r           Resolution
   678  	stopper     *stop.Stopper
   679  	calledCount int
   680  	once        sync.Once
   681  }
   682  
   683  // GetTimeSeriesData implements the DataSource interface, returning a predefined
   684  // set of TimeSeriesData to subsequent calls. It stores each TimeSeriesData
   685  // object in the test model before returning it. If all TimeSeriesData objects
   686  // have been returned, this method will stop the provided Stopper.
   687  func (mds *modelDataSource) GetTimeSeriesData() []tspb.TimeSeriesData {
   688  	if len(mds.datasets) == 0 {
   689  		// Stop on goroutine to prevent deadlock.
   690  		go mds.once.Do(func() { mds.stopper.Stop(context.Background()) })
   691  		return nil
   692  	}
   693  	mds.calledCount++
   694  	data := mds.datasets[0]
   695  	mds.datasets = mds.datasets[1:]
   696  
   697  	for _, d := range data {
   698  		mds.model.storeInModel(mds.r, d)
   699  	}
   700  	return data
   701  }
   702  
   703  // TestStoreTimeSeries is a simple test of the Time Series module, ensuring that
   704  // it is storing time series correctly.
   705  func TestStoreTimeSeries(t *testing.T) {
   706  	defer leaktest.AfterTest(t)()
   707  	runTestCaseMultipleFormats(t, func(t *testing.T, tm testModelRunner) {
   708  
   709  		// Basic storage operation: one data point.
   710  		tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{
   711  			tsd("test.metric", "",
   712  				tsdp(440000000000000000, 100),
   713  			),
   714  		})
   715  		tm.assertKeyCount(1)
   716  		tm.assertModelCorrect()
   717  
   718  		// Store data with different sources, and with multiple data points that
   719  		// aggregate into the same key.
   720  		tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{
   721  			tsd("test.metric.float", "cpu01",
   722  				tsdp(1428713843000000000, 100.0),
   723  				tsdp(1428713843000000001, 50.2),
   724  				tsdp(1428713843000000002, 90.9),
   725  			),
   726  		})
   727  		tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{
   728  			tsd("test.metric.float", "cpu02",
   729  				tsdp(1428713843000000000, 900.8),
   730  				tsdp(1428713843000000001, 30.12),
   731  				tsdp(1428713843000000002, 72.324),
   732  			),
   733  		})
   734  		tm.assertKeyCount(3)
   735  		tm.assertModelCorrect()
   736  
   737  		// A single storage operation that stores to multiple keys, including an
   738  		// existing key.
   739  		tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{
   740  			tsd("test.metric", "",
   741  				tsdp(440000000000000000, 200),
   742  				tsdp(450000000000000001, 1),
   743  				tsdp(460000000000000000, 777),
   744  			),
   745  		})
   746  		tm.assertKeyCount(5)
   747  		tm.assertModelCorrect()
   748  	})
   749  }
   750  
   751  // TestPollSource verifies that polled data sources are called as expected.
   752  func TestPollSource(t *testing.T) {
   753  	defer leaktest.AfterTest(t)()
   754  	runTestCaseMultipleFormats(t, func(t *testing.T, tm testModelRunner) {
   755  		testSource := modelDataSource{
   756  			model:   tm,
   757  			r:       Resolution10s,
   758  			stopper: stop.NewStopper(),
   759  			datasets: [][]tspb.TimeSeriesData{
   760  				{
   761  					tsd("test.metric.float", "cpu01",
   762  						tsdp(1428713843000000000, 100.0),
   763  						tsdp(1428713843000000001, 50.2),
   764  						tsdp(1428713843000000002, 90.9),
   765  					),
   766  					tsd("test.metric.float", "cpu02",
   767  						tsdp(1428713843000000000, 900.8),
   768  						tsdp(1428713843000000001, 30.12),
   769  						tsdp(1428713843000000002, 72.324),
   770  					),
   771  				},
   772  				{
   773  					tsd("test.metric", "",
   774  						tsdp(1428713843000000000, 100),
   775  					),
   776  				},
   777  			},
   778  		}
   779  
   780  		ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
   781  		tm.DB.PollSource(ambient, &testSource, time.Millisecond, Resolution10s, testSource.stopper)
   782  		<-testSource.stopper.IsStopped()
   783  		if a, e := testSource.calledCount, 2; a != e {
   784  			t.Errorf("testSource was called %d times, expected %d", a, e)
   785  		}
   786  		tm.assertKeyCount(3)
   787  		tm.assertModelCorrect()
   788  	})
   789  }
   790  
   791  // TestDisableStorage verifies that disabling timeseries storage via the cluster
   792  // setting works properly.
   793  func TestDisableStorage(t *testing.T) {
   794  	defer leaktest.AfterTest(t)()
   795  	runTestCaseMultipleFormats(t, func(t *testing.T, tm testModelRunner) {
   796  		TimeseriesStorageEnabled.Override(&tm.Cfg.Settings.SV, false)
   797  
   798  		// Basic storage operation: one data point.
   799  		tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{
   800  			tsd("test.metric", "",
   801  				tsdp(440000000000000000, 100),
   802  			),
   803  		})
   804  		tm.assertKeyCount(0)
   805  		tm.assertModelCorrect()
   806  
   807  		testSource := modelDataSource{
   808  			model:   tm,
   809  			r:       Resolution10s,
   810  			stopper: stop.NewStopper(),
   811  			datasets: [][]tspb.TimeSeriesData{
   812  				{
   813  					tsd("test.metric.float", "cpu01",
   814  						tsdp(1428713843000000000, 100.0),
   815  						tsdp(1428713843000000001, 50.2),
   816  						tsdp(1428713843000000002, 90.9),
   817  					),
   818  					tsd("test.metric.float", "cpu02",
   819  						tsdp(1428713843000000000, 900.8),
   820  						tsdp(1428713843000000001, 30.12),
   821  						tsdp(1428713843000000002, 72.324),
   822  					),
   823  				},
   824  				{
   825  					tsd("test.metric", "",
   826  						tsdp(1428713843000000000, 100),
   827  					),
   828  				},
   829  			},
   830  		}
   831  
   832  		ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
   833  		tm.DB.PollSource(ambient, &testSource, time.Millisecond, Resolution10s, testSource.stopper)
   834  		select {
   835  		case <-testSource.stopper.IsStopped():
   836  			t.Error("testSource data exhausted when polling should have been enabled")
   837  		case <-time.After(50 * time.Millisecond):
   838  			testSource.stopper.Stop(context.Background())
   839  		}
   840  		if a, e := testSource.calledCount, 0; a != e {
   841  			t.Errorf("testSource was called %d times, expected %d", a, e)
   842  		}
   843  		tm.assertKeyCount(0)
   844  		tm.assertModelCorrect()
   845  	})
   846  }
   847  
   848  // TestPruneThreshold verifies that `PruneThreshold` returns correct result in nanoseconds
   849  func TestPruneThreshold(t *testing.T) {
   850  	defer leaktest.AfterTest(t)()
   851  	runTestCaseMultipleFormats(t, func(t *testing.T, tm testModelRunner) {
   852  		db := NewDB(nil, tm.Cfg.Settings)
   853  		var expected int64
   854  		if db.WriteRollups() {
   855  			expected = resolution10sDefaultRollupThreshold.Nanoseconds()
   856  		} else {
   857  			expected = deprecatedResolution10sDefaultPruneThreshold.Nanoseconds()
   858  		}
   859  		result := db.PruneThreshold(Resolution10s)
   860  		if expected != result {
   861  			t.Errorf("prune threshold did not match expected value: %d != %d", expected, result)
   862  		}
   863  	})
   864  }