github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/aggregator/integration/integration_data.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package integration
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"sort"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/m3db/m3/src/aggregator/aggregation"
    31  	"github.com/m3db/m3/src/aggregator/aggregator"
    32  	maggregation "github.com/m3db/m3/src/metrics/aggregation"
    33  	"github.com/m3db/m3/src/metrics/metadata"
    34  	"github.com/m3db/m3/src/metrics/metric"
    35  	"github.com/m3db/m3/src/metrics/metric/aggregated"
    36  	metricid "github.com/m3db/m3/src/metrics/metric/id"
    37  	"github.com/m3db/m3/src/metrics/metric/unaggregated"
    38  	"github.com/m3db/m3/src/metrics/pipeline/applied"
    39  	"github.com/m3db/m3/src/metrics/policy"
    40  	xtime "github.com/m3db/m3/src/x/time"
    41  
    42  	"github.com/google/go-cmp/cmp"
    43  	"github.com/google/go-cmp/cmp/cmpopts"
    44  	"github.com/stretchr/testify/require"
    45  )
    46  
    47  var (
    48  	testStagedMetadatas = metadata.StagedMetadatas{
    49  		{
    50  			CutoverNanos: 0,
    51  			Tombstoned:   false,
    52  			Metadata: metadata.Metadata{
    53  				Pipelines: []metadata.PipelineMetadata{
    54  					{
    55  						AggregationID: maggregation.DefaultID,
    56  						StoragePolicies: []policy.StoragePolicy{
    57  							policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour),
    58  							policy.NewStoragePolicy(2*time.Second, xtime.Second, 6*time.Hour),
    59  						},
    60  					},
    61  					{
    62  						AggregationID: maggregation.MustCompressTypes(maggregation.Sum),
    63  						StoragePolicies: []policy.StoragePolicy{
    64  							policy.NewStoragePolicy(time.Second, xtime.Second, 2*time.Hour),
    65  						},
    66  					},
    67  				},
    68  			},
    69  		},
    70  	}
    71  	testStagedMetadatasWithCustomAggregation1 = metadata.StagedMetadatas{
    72  		{
    73  			CutoverNanos: 0,
    74  			Tombstoned:   false,
    75  			Metadata: metadata.Metadata{
    76  				Pipelines: []metadata.PipelineMetadata{
    77  					{
    78  						AggregationID: maggregation.MustCompressTypes(maggregation.Min),
    79  						StoragePolicies: []policy.StoragePolicy{
    80  							policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour),
    81  							policy.NewStoragePolicy(2*time.Second, xtime.Second, 6*time.Hour),
    82  						},
    83  					},
    84  				},
    85  			},
    86  		},
    87  	}
    88  	testStagedMetadatasWithCustomAggregation2 = metadata.StagedMetadatas{
    89  		{
    90  			CutoverNanos: 0,
    91  			Tombstoned:   false,
    92  			Metadata: metadata.Metadata{
    93  				Pipelines: []metadata.PipelineMetadata{
    94  					{
    95  						AggregationID: maggregation.MustCompressTypes(maggregation.Min, maggregation.Max),
    96  						StoragePolicies: []policy.StoragePolicy{
    97  							policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour),
    98  							policy.NewStoragePolicy(3*time.Second, xtime.Second, 24*time.Hour),
    99  						},
   100  					},
   101  				},
   102  			},
   103  		},
   104  	}
   105  	testUpdatedStagedMetadatas = metadata.StagedMetadatas{
   106  		{
   107  			CutoverNanos: 0,
   108  			Tombstoned:   false,
   109  			Metadata: metadata.Metadata{
   110  				Pipelines: []metadata.PipelineMetadata{
   111  					{
   112  						AggregationID: maggregation.MustCompressTypes(maggregation.Mean),
   113  						StoragePolicies: []policy.StoragePolicy{
   114  							policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour),
   115  							policy.NewStoragePolicy(3*time.Second, xtime.Second, 6*time.Hour),
   116  						},
   117  					},
   118  					{
   119  						AggregationID: maggregation.DefaultID,
   120  						StoragePolicies: []policy.StoragePolicy{
   121  							policy.NewStoragePolicy(2*time.Second, xtime.Second, 2*time.Hour),
   122  						},
   123  					},
   124  				},
   125  			},
   126  		},
   127  	}
   128  	testCmpOpts = []cmp.Option{
   129  		cmpopts.EquateEmpty(),
   130  		cmpopts.EquateNaNs(),
   131  		cmp.AllowUnexported(policy.StoragePolicy{}),
   132  	}
   133  )
   134  
   135  func generateTestIDs(prefix string, numIDs int) []string {
   136  	ids := make([]string, numIDs)
   137  	for i := 0; i < numIDs; i++ {
   138  		ids[i] = fmt.Sprintf("%s%d", prefix, i)
   139  	}
   140  	return ids
   141  }
   142  
   143  func mustGenerateTestDataset(t *testing.T, opts datasetGenOpts) testDataset {
   144  	ds, err := generateTestDataset(opts)
   145  	require.NoError(t, err)
   146  	return ds
   147  }
   148  
   149  func generateTestDataset(opts datasetGenOpts) (testDataset, error) {
   150  	var (
   151  		testDataset []testData
   152  		intervalIdx int
   153  	)
   154  	for timestamp := opts.start; timestamp.Before(opts.stop); timestamp = timestamp.Add(opts.interval) {
   155  		metricWithMetadatas := make([]metricWithMetadataUnion, 0, len(opts.ids))
   156  		for i := 0; i < len(opts.ids); i++ {
   157  			var (
   158  				metricType = opts.typeFn(timestamp, i)
   159  				mu         metricUnion
   160  			)
   161  			switch opts.category {
   162  			case untimedMetric:
   163  				var err error
   164  				mu, err = generateTestUntimedMetric(metricType, opts.ids[i], xtime.ToUnixNano(timestamp), intervalIdx,
   165  					i, opts.valueGenOpts.untimed)
   166  				if err != nil {
   167  					return nil, err
   168  				}
   169  			case forwardedMetric:
   170  				mu = generateTestForwardedMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx,
   171  					i, opts.valueGenOpts.forwarded)
   172  			case timedMetric:
   173  				mu = generateTestTimedMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx,
   174  					i, opts.valueGenOpts.timed)
   175  			case passthroughMetric:
   176  				mu = generateTestPassthroughMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx,
   177  					i, opts.valueGenOpts.passthrough)
   178  			default:
   179  				return nil, fmt.Errorf("unrecognized metric category: %v", opts.category)
   180  			}
   181  			metricWithMetadatas = append(metricWithMetadatas, metricWithMetadataUnion{
   182  				metric:   mu,
   183  				metadata: opts.metadataFn(i),
   184  			})
   185  		}
   186  		testDataset = append(testDataset, testData{
   187  			timestamp:           timestamp,
   188  			metricWithMetadatas: metricWithMetadatas,
   189  		})
   190  		intervalIdx++
   191  	}
   192  	return testDataset, nil
   193  }
   194  
   195  func generateTestUntimedMetric(
   196  	metricType metric.Type,
   197  	id string,
   198  	timestamp xtime.UnixNano,
   199  	intervalIdx, idIdx int,
   200  	valueGenOpts untimedValueGenOpts,
   201  ) (metricUnion, error) {
   202  	mu := metricUnion{category: untimedMetric}
   203  	annotation := generateAnnotation(metricType, idIdx)
   204  	switch metricType {
   205  	case metric.CounterType:
   206  		mu.untimed = unaggregated.MetricUnion{
   207  			Type:            metricType,
   208  			ID:              metricid.RawID(id),
   209  			CounterVal:      valueGenOpts.counterValueGenFn(intervalIdx, idIdx),
   210  			Annotation:      annotation,
   211  			ClientTimeNanos: timestamp,
   212  		}
   213  	case metric.TimerType:
   214  		mu.untimed = unaggregated.MetricUnion{
   215  			Type:            metricType,
   216  			ID:              metricid.RawID(id),
   217  			BatchTimerVal:   valueGenOpts.timerValueGenFn(intervalIdx, idIdx),
   218  			Annotation:      annotation,
   219  			ClientTimeNanos: timestamp,
   220  		}
   221  	case metric.GaugeType:
   222  		mu.untimed = unaggregated.MetricUnion{
   223  			Type:            metricType,
   224  			ID:              metricid.RawID(id),
   225  			GaugeVal:        valueGenOpts.gaugeValueGenFn(intervalIdx, idIdx),
   226  			Annotation:      annotation,
   227  			ClientTimeNanos: timestamp,
   228  		}
   229  	default:
   230  		return metricUnion{}, fmt.Errorf("unrecognized untimed metric type: %v", metricType)
   231  	}
   232  	return mu, nil
   233  }
   234  
   235  func generateTestTimedMetric(
   236  	metricType metric.Type,
   237  	id string,
   238  	timeNanos int64,
   239  	intervalIdx, idIdx int,
   240  	valueGenOpts timedValueGenOpts,
   241  ) metricUnion {
   242  	return metricUnion{
   243  		category: timedMetric,
   244  		timed: aggregated.Metric{
   245  			Type:       metricType,
   246  			ID:         metricid.RawID(id),
   247  			TimeNanos:  timeNanos,
   248  			Value:      valueGenOpts.timedValueGenFn(intervalIdx, idIdx),
   249  			Annotation: generateAnnotation(metricType, idIdx),
   250  		},
   251  	}
   252  }
   253  
   254  func generateTestPassthroughMetric(
   255  	metricType metric.Type,
   256  	id string,
   257  	timeNanos int64,
   258  	intervalIdx, idIdx int,
   259  	valueGenOpts passthroughValueGenOpts,
   260  ) metricUnion {
   261  	return metricUnion{
   262  		category: passthroughMetric,
   263  		passthrough: aggregated.Metric{
   264  			Type:       metricType,
   265  			ID:         metricid.RawID(id),
   266  			TimeNanos:  timeNanos,
   267  			Value:      valueGenOpts.passthroughValueGenFn(intervalIdx, idIdx),
   268  			Annotation: generateAnnotation(metricType, idIdx),
   269  		},
   270  	}
   271  }
   272  
   273  func generateTestForwardedMetric(
   274  	metricType metric.Type,
   275  	id string,
   276  	timeNanos int64,
   277  	intervalIdx, idIdx int,
   278  	valueGenOpts forwardedValueGenOpts,
   279  ) metricUnion {
   280  	return metricUnion{
   281  		category: forwardedMetric,
   282  		forwarded: aggregated.ForwardedMetric{
   283  			Type:       metricType,
   284  			ID:         metricid.RawID(id),
   285  			TimeNanos:  timeNanos,
   286  			Values:     valueGenOpts.forwardedValueGenFn(intervalIdx, idIdx),
   287  			Annotation: generateAnnotation(metricType, idIdx),
   288  		},
   289  	}
   290  }
   291  
   292  func generateAnnotation(typ metric.Type, idx int) []byte {
   293  	return []byte(fmt.Sprintf("%v annotation, idx=%v", typ.String(), idx))
   294  }
   295  
   296  func mustComputeExpectedResults(
   297  	t *testing.T,
   298  	now time.Time,
   299  	dataset testDataset,
   300  	opts aggregator.Options,
   301  ) []aggregated.MetricWithStoragePolicy {
   302  	res, err := computeExpectedResults(now, dataset, opts)
   303  	require.NoError(t, err)
   304  	return res
   305  }
   306  
   307  func computeExpectedResults(
   308  	now time.Time,
   309  	dataset testDataset,
   310  	opts aggregator.Options,
   311  ) ([]aggregated.MetricWithStoragePolicy, error) {
   312  	buckets, err := computeExpectedAggregationBuckets(now, dataset, opts)
   313  	if err != nil {
   314  		return nil, err
   315  	}
   316  	return computeExpectedAggregationOutput(now, buckets, opts)
   317  }
   318  
   319  // computeExpectedAggregationBuckets computes the expected aggregation buckets for the given
   320  // dataset and the aggregation keys, assuming each metric in the given dataset is associated
   321  // with the full set of aggregation keys passed in.
   322  func computeExpectedAggregationBuckets(
   323  	now time.Time,
   324  	dataset testDataset,
   325  	opts aggregator.Options,
   326  ) ([]aggregationBucket, error) {
   327  	var (
   328  		buckets                = make([]aggregationBucket, 0)
   329  		defaultStoragePolicies = opts.DefaultStoragePolicies()
   330  	)
   331  	for _, dataValues := range dataset {
   332  		for _, mm := range dataValues.metricWithMetadatas {
   333  			keys, err := mm.metadata.expectedAggregationKeys(now, defaultStoragePolicies)
   334  			if err != nil {
   335  				return nil, err
   336  			}
   337  			for _, key := range keys {
   338  				// Find or create the corresponding bucket.
   339  				var bucket *aggregationBucket
   340  				for _, b := range buckets {
   341  					if b.key.Equal(key) {
   342  						bucket = &b
   343  						break
   344  					}
   345  				}
   346  				if bucket == nil {
   347  					buckets = append(buckets, aggregationBucket{key: key, data: make(datapointsByID)})
   348  					bucket = &buckets[len(buckets)-1]
   349  				}
   350  
   351  				// Add metric to the list of metrics aggregated by the aggregation bucket if necessary.
   352  				mu := mm.metric
   353  				key := metricKey{category: mu.category, typ: mu.Type(), id: string(mu.ID()), storagePolicy: key.storagePolicy}
   354  				datapoints, metricExists := bucket.data[key]
   355  				if !metricExists {
   356  					datapoints = make(valuesByTime)
   357  					bucket.data[key] = datapoints
   358  				}
   359  
   360  				// Add metric to the time bucket associated with the aggregation bucket if necessary.
   361  				resolution := bucket.key.storagePolicy.Resolution()
   362  				alignedStartNanos := dataValues.timestamp.Truncate(resolution.Window).UnixNano()
   363  				values, timeBucketExists := datapoints[alignedStartNanos]
   364  				if !timeBucketExists {
   365  					var (
   366  						aggTypeOpts     = opts.AggregationTypesOptions()
   367  						aggTypes        = maggregation.NewIDDecompressor().MustDecompress(bucket.key.aggregationID)
   368  						aggregationOpts = aggregation.NewOptions(opts.InstrumentOptions())
   369  					)
   370  					switch mu.Type() {
   371  					case metric.CounterType:
   372  						if aggTypes.IsDefault() {
   373  							aggTypes = aggTypeOpts.DefaultCounterAggregationTypes()
   374  						}
   375  						aggregationOpts.ResetSetData(aggTypes)
   376  						values = aggregation.NewCounter(aggregationOpts)
   377  					case metric.TimerType:
   378  						if aggTypes.IsDefault() {
   379  							aggTypes = aggTypeOpts.DefaultTimerAggregationTypes()
   380  						}
   381  						aggregationOpts.ResetSetData(aggTypes)
   382  						values = aggregation.NewTimer(aggTypeOpts.Quantiles(), opts.StreamOptions(), aggregationOpts)
   383  					case metric.GaugeType:
   384  						if aggTypes.IsDefault() {
   385  							aggTypes = aggTypeOpts.DefaultGaugeAggregationTypes()
   386  						}
   387  						aggregationOpts.ResetSetData(aggTypes)
   388  						values = aggregation.NewGauge(aggregationOpts)
   389  					default:
   390  						return nil, fmt.Errorf("unrecognized metric type %v", mu.Type())
   391  					}
   392  				}
   393  
   394  				// Add metric value to the corresponding time bucket.
   395  				var err error
   396  				switch mu.category {
   397  				case untimedMetric:
   398  					values, err = addUntimedMetricToAggregation(values, mu.untimed)
   399  				case forwardedMetric:
   400  					values, err = addForwardedMetricToAggregation(values, mu.forwarded)
   401  				case timedMetric:
   402  					values, err = addTimedMetricToAggregation(values, mu.timed)
   403  				case passthroughMetric:
   404  					// Passthrough metrics need no aggregation.
   405  					err = nil
   406  				default:
   407  					err = fmt.Errorf("unrecognized metric category: %v", mu.category)
   408  				}
   409  				if err != nil {
   410  					return nil, err
   411  				}
   412  				datapoints[alignedStartNanos] = values
   413  			}
   414  		}
   415  	}
   416  
   417  	return buckets, nil
   418  }
   419  
   420  func addUntimedMetricToAggregation(
   421  	values interface{},
   422  	mu unaggregated.MetricUnion,
   423  ) (interface{}, error) {
   424  	switch mu.Type {
   425  	case metric.CounterType:
   426  		v := values.(aggregation.Counter)
   427  		v.Update(time.Now(), mu.CounterVal, mu.Annotation)
   428  		return v, nil
   429  	case metric.TimerType:
   430  		v := values.(aggregation.Timer)
   431  		v.AddBatch(time.Now(), mu.BatchTimerVal, mu.Annotation)
   432  		return v, nil
   433  	case metric.GaugeType:
   434  		v := values.(aggregation.Gauge)
   435  		v.Update(time.Now(), mu.GaugeVal, mu.Annotation)
   436  		return v, nil
   437  	default:
   438  		return nil, fmt.Errorf("unrecognized untimed metric type %v", mu.Type)
   439  	}
   440  }
   441  
   442  func addTimedMetricToAggregation(
   443  	values interface{},
   444  	mu aggregated.Metric,
   445  ) (interface{}, error) {
   446  	switch mu.Type {
   447  	case metric.CounterType:
   448  		v := values.(aggregation.Counter)
   449  		v.Update(time.Now(), int64(mu.Value), mu.Annotation)
   450  		return v, nil
   451  	case metric.TimerType:
   452  		v := values.(aggregation.Timer)
   453  		v.AddBatch(time.Now(), []float64{mu.Value}, mu.Annotation)
   454  		return v, nil
   455  	case metric.GaugeType:
   456  		v := values.(aggregation.Gauge)
   457  		v.Update(time.Now(), mu.Value, mu.Annotation)
   458  		return v, nil
   459  	default:
   460  		return nil, fmt.Errorf("unrecognized timed metric type %v", mu.Type)
   461  	}
   462  }
   463  
   464  func addForwardedMetricToAggregation(
   465  	values interface{},
   466  	mu aggregated.ForwardedMetric,
   467  ) (interface{}, error) {
   468  	switch mu.Type {
   469  	case metric.CounterType:
   470  		v := values.(aggregation.Counter)
   471  		for _, val := range mu.Values {
   472  			v.Update(time.Now(), int64(val), mu.Annotation)
   473  		}
   474  		return v, nil
   475  	case metric.TimerType:
   476  		v := values.(aggregation.Timer)
   477  		v.AddBatch(time.Now(), mu.Values, mu.Annotation)
   478  		return v, nil
   479  	case metric.GaugeType:
   480  		v := values.(aggregation.Gauge)
   481  		for _, val := range mu.Values {
   482  			v.Update(time.Now(), val, mu.Annotation)
   483  		}
   484  		return v, nil
   485  	default:
   486  		return nil, fmt.Errorf("unrecognized forwarded metric type %v", mu.Type)
   487  	}
   488  }
   489  
   490  // computeExpectedAggregationOutput computes the expected aggregation output given
   491  // the current time and the populated aggregation buckets.
   492  func computeExpectedAggregationOutput(
   493  	now time.Time,
   494  	buckets []aggregationBucket,
   495  	opts aggregator.Options,
   496  ) ([]aggregated.MetricWithStoragePolicy, error) {
   497  	var expected []aggregated.MetricWithStoragePolicy
   498  	for _, bucket := range buckets {
   499  		var (
   500  			aggregationTypes   = maggregation.NewIDDecompressor().MustDecompress(bucket.key.aggregationID)
   501  			storagePolicy      = bucket.key.storagePolicy
   502  			resolutionWindow   = storagePolicy.Resolution().Window
   503  			alignedCutoffNanos = now.Truncate(resolutionWindow).UnixNano()
   504  		)
   505  		for key, datapoints := range bucket.data {
   506  			timestampNanosFn := key.category.TimestampNanosFn()
   507  			for windowStartAtNanos, values := range datapoints {
   508  				timestampNanos := timestampNanosFn(windowStartAtNanos, resolutionWindow)
   509  				// The end time must be no later than the aligned cutoff time
   510  				// for the data to be flushed.
   511  				if timestampNanos > alignedCutoffNanos {
   512  					continue
   513  				}
   514  				outputs, err := computeExpectedAggregatedMetrics(
   515  					key,
   516  					timestampNanos,
   517  					values,
   518  					storagePolicy,
   519  					aggregationTypes,
   520  					opts,
   521  				)
   522  				if err != nil {
   523  					return nil, err
   524  				}
   525  				expected = append(expected, outputs...)
   526  			}
   527  		}
   528  	}
   529  
   530  	// Sort the aggregated metrics.
   531  	sort.Sort(byTimeIDPolicyAscending(expected))
   532  
   533  	return expected, nil
   534  }
   535  
   536  // computeExpectedAggregatedMetrics computes the expected set of aggregated metrics
   537  // given the metric key, timestamp, metric aggregation, and related aggregation metadata.
   538  func computeExpectedAggregatedMetrics(
   539  	key metricKey,
   540  	timeNanos int64,
   541  	metricAgg interface{},
   542  	sp policy.StoragePolicy,
   543  	aggTypes maggregation.Types,
   544  	opts aggregator.Options,
   545  ) ([]aggregated.MetricWithStoragePolicy, error) {
   546  	var results []aggregated.MetricWithStoragePolicy
   547  	fn := func(
   548  		prefix []byte,
   549  		id string,
   550  		suffix []byte,
   551  		timeNanos int64,
   552  		value float64,
   553  		annotation []byte,
   554  		sp policy.StoragePolicy,
   555  	) {
   556  		results = append(results, aggregated.MetricWithStoragePolicy{
   557  			Metric: aggregated.Metric{
   558  				ID:         metricid.RawID(string(prefix) + id + string(suffix)),
   559  				TimeNanos:  timeNanos,
   560  				Value:      value,
   561  				Annotation: annotation,
   562  			},
   563  			StoragePolicy: sp,
   564  		})
   565  	}
   566  
   567  	id := key.id
   568  	aggTypeOpts := opts.AggregationTypesOptions()
   569  	switch metricAgg := metricAgg.(type) {
   570  	case aggregation.Counter:
   571  		if aggTypes.IsDefault() {
   572  			aggTypes = aggTypeOpts.DefaultCounterAggregationTypes()
   573  		}
   574  
   575  		for _, aggType := range aggTypes {
   576  			if key.category == timedMetric {
   577  				fn(nil, id, nil, timeNanos, metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   578  				continue
   579  			}
   580  			fn(opts.FullCounterPrefix(), id, aggTypeOpts.TypeStringForCounter(aggType), timeNanos,
   581  				metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   582  		}
   583  	case aggregation.Timer:
   584  		if aggTypes.IsDefault() {
   585  			aggTypes = aggTypeOpts.DefaultTimerAggregationTypes()
   586  		}
   587  
   588  		for _, aggType := range aggTypes {
   589  			if key.category == timedMetric {
   590  				fn(nil, id, nil, timeNanos, metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   591  				continue
   592  			}
   593  			fn(opts.FullTimerPrefix(), id, aggTypeOpts.TypeStringForTimer(aggType), timeNanos,
   594  				metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   595  		}
   596  	case aggregation.Gauge:
   597  		if aggTypes.IsDefault() {
   598  			aggTypes = aggTypeOpts.DefaultGaugeAggregationTypes()
   599  		}
   600  
   601  		for _, aggType := range aggTypes {
   602  			if key.category == timedMetric {
   603  				fn(nil, id, nil, timeNanos, metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   604  				continue
   605  			}
   606  			fn(opts.FullGaugePrefix(), id, aggTypeOpts.TypeStringForGauge(aggType), timeNanos,
   607  				metricAgg.ValueOf(aggType), metricAgg.Annotation(), sp)
   608  		}
   609  	default:
   610  		return nil, fmt.Errorf("unrecognized aggregation type %T", metricAgg)
   611  	}
   612  
   613  	return results, nil
   614  }
   615  
   616  func roundRobinMetricTypeFn(_ time.Time, idx int) metric.Type {
   617  	switch idx % 3 {
   618  	case 0:
   619  		return metric.CounterType
   620  	case 1:
   621  		return metric.TimerType
   622  	default:
   623  		return metric.GaugeType
   624  	}
   625  }
   626  
   627  func constantMetricTypeFnFactory(typ metric.Type) metricTypeFn {
   628  	return func(time.Time, int) metric.Type { return typ }
   629  }
   630  
   631  type byTimeIDPolicyAscending []aggregated.MetricWithStoragePolicy
   632  
   633  func (a byTimeIDPolicyAscending) Len() int      { return len(a) }
   634  func (a byTimeIDPolicyAscending) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
   635  func (a byTimeIDPolicyAscending) Less(i, j int) bool {
   636  	if a[i].TimeNanos != a[j].TimeNanos {
   637  		return a[i].TimeNanos < a[j].TimeNanos
   638  	}
   639  	id1, id2 := string(a[i].ID), string(a[j].ID)
   640  	if id1 != id2 {
   641  		return id1 < id2
   642  	}
   643  	resolution1, resolution2 := a[i].Resolution().Window, a[j].Resolution().Window
   644  	if resolution1 != resolution2 {
   645  		return resolution1 < resolution2
   646  	}
   647  	retention1, retention2 := a[i].Retention(), a[j].Retention()
   648  	return retention1 < retention2
   649  }
   650  
   651  type metricTypeFn func(ts time.Time, idx int) metric.Type
   652  
   653  type metricKey struct {
   654  	category      metricCategory
   655  	typ           metric.Type
   656  	id            string
   657  	storagePolicy policy.StoragePolicy
   658  }
   659  
   660  type (
   661  	valuesByTime   map[int64]interface{}
   662  	datapointsByID map[metricKey]valuesByTime
   663  )
   664  
   665  type aggregationKey struct {
   666  	aggregationID maggregation.ID
   667  	storagePolicy policy.StoragePolicy
   668  	pipeline      applied.Pipeline
   669  }
   670  
   671  func (k aggregationKey) Equal(other aggregationKey) bool {
   672  	return k.aggregationID == other.aggregationID &&
   673  		k.storagePolicy == other.storagePolicy &&
   674  		k.pipeline.Equal(other.pipeline)
   675  }
   676  
   677  type aggregationKeys []aggregationKey
   678  
   679  func (keys *aggregationKeys) add(newKey aggregationKey) {
   680  	for _, k := range *keys {
   681  		if k.Equal(newKey) {
   682  			return
   683  		}
   684  	}
   685  	*keys = append(*keys, newKey)
   686  }
   687  
   688  type aggregationBucket struct {
   689  	key  aggregationKey
   690  	data datapointsByID
   691  }
   692  
   693  // timestampNanosFn computes the timestamp in nanoseconds of metrics in a given time window.
   694  type timestampNanosFn func(windowStartAtNanos int64, resolution time.Duration) int64
   695  
   696  type metricCategory int
   697  
   698  const (
   699  	untimedMetric metricCategory = iota
   700  	forwardedMetric
   701  	timedMetric
   702  	passthroughMetric
   703  )
   704  
   705  func (c metricCategory) TimestampNanosFn() timestampNanosFn {
   706  	switch c {
   707  	case untimedMetric:
   708  		return func(windowStartAtNanos int64, resolution time.Duration) int64 {
   709  			return windowStartAtNanos + resolution.Nanoseconds()
   710  		}
   711  	case forwardedMetric:
   712  		return func(windowStartAtNanos int64, _ time.Duration) int64 {
   713  			return windowStartAtNanos
   714  		}
   715  	case timedMetric:
   716  		return func(windowStartAtNanos int64, resolution time.Duration) int64 {
   717  			return windowStartAtNanos + resolution.Nanoseconds()
   718  		}
   719  	case passthroughMetric:
   720  		return func(windowStartAtNanos int64, _ time.Duration) int64 {
   721  			return windowStartAtNanos
   722  		}
   723  	default:
   724  		panic(fmt.Errorf("unknown category type: %v", c))
   725  	}
   726  }
   727  
   728  type metricUnion struct {
   729  	category    metricCategory
   730  	untimed     unaggregated.MetricUnion
   731  	forwarded   aggregated.ForwardedMetric
   732  	timed       aggregated.Metric
   733  	passthrough aggregated.Metric
   734  }
   735  
   736  func (mu metricUnion) Type() metric.Type {
   737  	switch mu.category {
   738  	case untimedMetric:
   739  		return mu.untimed.Type
   740  	case forwardedMetric:
   741  		return mu.forwarded.Type
   742  	case timedMetric:
   743  		return mu.timed.Type
   744  	case passthroughMetric:
   745  		return mu.passthrough.Type
   746  	default:
   747  		panic(fmt.Errorf("unknown category type: %v", mu.category))
   748  	}
   749  }
   750  
   751  func (mu metricUnion) ID() metricid.RawID {
   752  	switch mu.category {
   753  	case untimedMetric:
   754  		return mu.untimed.ID
   755  	case forwardedMetric:
   756  		return mu.forwarded.ID
   757  	case timedMetric:
   758  		return mu.timed.ID
   759  	case passthroughMetric:
   760  		return mu.passthrough.ID
   761  	default:
   762  		panic(fmt.Errorf("unknown category type: %v", mu.category))
   763  	}
   764  }
   765  
   766  type metadataType int
   767  
   768  const (
   769  	stagedMetadatasType metadataType = iota
   770  	forwardMetadataType
   771  	timedMetadataType
   772  	passthroughMetadataType
   773  )
   774  
   775  type metadataFn func(idx int) metadataUnion
   776  
   777  type metadataUnion struct {
   778  	mType               metadataType
   779  	stagedMetadatas     metadata.StagedMetadatas
   780  	forwardMetadata     metadata.ForwardMetadata
   781  	timedMetadata       metadata.TimedMetadata
   782  	passthroughMetadata policy.StoragePolicy
   783  }
   784  
   785  func (mu metadataUnion) expectedAggregationKeys(
   786  	now time.Time,
   787  	defaultStoragePolicies []policy.StoragePolicy,
   788  ) (aggregationKeys, error) {
   789  	switch mu.mType {
   790  	case stagedMetadatasType:
   791  		return computeExpectedAggregationKeysFromStagedMetadatas(now, mu.stagedMetadatas, defaultStoragePolicies)
   792  	case forwardMetadataType:
   793  		return computeExpectedAggregationKeysFromForwardMetadata(mu.forwardMetadata), nil
   794  	case timedMetadataType:
   795  		return computeExpectedAggregationKeysFromTimedMetadata(mu.timedMetadata), nil
   796  	case passthroughMetadataType:
   797  		return computeExpectedAggregationKeysFromPassthroughMetadata(mu.passthroughMetadata), nil
   798  	default:
   799  		return nil, fmt.Errorf("unexpected metadata type: %v", mu.mType)
   800  	}
   801  }
   802  
   803  func computeExpectedAggregationKeysFromStagedMetadatas(
   804  	now time.Time,
   805  	metadatas metadata.StagedMetadatas,
   806  	defaultStoragePolices []policy.StoragePolicy,
   807  ) (aggregationKeys, error) {
   808  	// Find the staged policy that is currently active.
   809  	nowNanos := now.UnixNano()
   810  	i := len(metadatas) - 1
   811  	for i >= 0 {
   812  		if metadatas[i].CutoverNanos <= nowNanos {
   813  			break
   814  		}
   815  		i--
   816  	}
   817  	if i < 0 {
   818  		return nil, errors.New("no active staged metadata")
   819  	}
   820  
   821  	res := make(aggregationKeys, 0, len(metadatas[i].Pipelines))
   822  	for _, pipeline := range metadatas[i].Pipelines {
   823  		storagePolicies := pipeline.StoragePolicies
   824  		if storagePolicies.IsDefault() {
   825  			storagePolicies = defaultStoragePolices
   826  		}
   827  		for _, sp := range storagePolicies {
   828  			newKey := aggregationKey{
   829  				aggregationID: pipeline.AggregationID,
   830  				storagePolicy: sp,
   831  				pipeline:      pipeline.Pipeline,
   832  			}
   833  			res.add(newKey)
   834  		}
   835  	}
   836  	return res, nil
   837  }
   838  
   839  func computeExpectedAggregationKeysFromTimedMetadata(
   840  	metadata metadata.TimedMetadata,
   841  ) aggregationKeys {
   842  	return aggregationKeys{
   843  		{
   844  			aggregationID: metadata.AggregationID,
   845  			storagePolicy: metadata.StoragePolicy,
   846  		},
   847  	}
   848  }
   849  
   850  func computeExpectedAggregationKeysFromPassthroughMetadata(
   851  	metadata policy.StoragePolicy,
   852  ) aggregationKeys {
   853  	return aggregationKeys{
   854  		{
   855  			aggregationID: maggregation.DefaultID,
   856  			storagePolicy: metadata,
   857  		},
   858  	}
   859  }
   860  
   861  func computeExpectedAggregationKeysFromForwardMetadata(
   862  	metadata metadata.ForwardMetadata,
   863  ) aggregationKeys {
   864  	return aggregationKeys{
   865  		{
   866  			aggregationID: metadata.AggregationID,
   867  			storagePolicy: metadata.StoragePolicy,
   868  			pipeline:      metadata.Pipeline,
   869  		},
   870  	}
   871  }
   872  
   873  type metricWithMetadataUnion struct {
   874  	metric   metricUnion
   875  	metadata metadataUnion
   876  }
   877  
   878  type testData struct {
   879  	timestamp           time.Time
   880  	metricWithMetadatas []metricWithMetadataUnion
   881  }
   882  
   883  type testDataset []testData
   884  
   885  type (
   886  	counterValueGenFn func(intervalIdx, idIdx int) int64
   887  	timerValueGenFn   func(intervalIdx, idIdx int) []float64
   888  	gaugeValueGenFn   func(intervalIdx, idIdx int) float64
   889  )
   890  
   891  func defaultCounterValueGenFn(intervalIdx, _ int) int64 {
   892  	testCounterVal := int64(123)
   893  	return testCounterVal + int64(intervalIdx)
   894  }
   895  
   896  func defaultTimerValueGenFn(intervalIdx, _ int) []float64 {
   897  	testBatchTimerVals := []float64{1.5, 2.5, 3.5, 4.5, 5.5}
   898  	vals := make([]float64, len(testBatchTimerVals))
   899  	for idx, v := range testBatchTimerVals {
   900  		vals[idx] = v + float64(intervalIdx)
   901  	}
   902  	return vals
   903  }
   904  
   905  func defaultGaugeValueGenFn(intervalIdx, _ int) float64 {
   906  	testGaugeVal := 456.789
   907  	return testGaugeVal + float64(intervalIdx)
   908  }
   909  
   910  type untimedValueGenOpts struct {
   911  	counterValueGenFn counterValueGenFn
   912  	timerValueGenFn   timerValueGenFn
   913  	gaugeValueGenFn   gaugeValueGenFn
   914  }
   915  
   916  var defaultUntimedValueGenOpts = untimedValueGenOpts{
   917  	counterValueGenFn: defaultCounterValueGenFn,
   918  	timerValueGenFn:   defaultTimerValueGenFn,
   919  	gaugeValueGenFn:   defaultGaugeValueGenFn,
   920  }
   921  
   922  type timedValueGenFn func(intervalIdx, idIdx int) float64
   923  
   924  func defaultTimedValueGenFn(intervalIdx, _ int) float64 {
   925  	testVal := 456.789
   926  	return testVal + float64(intervalIdx)
   927  }
   928  
   929  type timedValueGenOpts struct {
   930  	timedValueGenFn timedValueGenFn
   931  }
   932  
   933  var defaultTimedValueGenOpts = timedValueGenOpts{
   934  	timedValueGenFn: defaultTimedValueGenFn,
   935  }
   936  
   937  type passthroughValueGenFn func(intervalIdx, idIdx int) float64
   938  
   939  func defaultPassthroughValueGenFn(intervalIdx, _ int) float64 {
   940  	testVal := 123.456
   941  	return testVal + float64(intervalIdx)
   942  }
   943  
   944  type passthroughValueGenOpts struct {
   945  	passthroughValueGenFn passthroughValueGenFn
   946  }
   947  
   948  var defaultPassthroughValueGenOpts = passthroughValueGenOpts{
   949  	passthroughValueGenFn: defaultPassthroughValueGenFn,
   950  }
   951  
   952  type forwardedValueGenFn func(intervalIdx, idIdx int) []float64
   953  
   954  func defaultForwardedValueGenFn(intervalIdx, _ int) []float64 {
   955  	testForwardedVals := []float64{1.2, 3.4, 5.6}
   956  	vals := make([]float64, len(testForwardedVals))
   957  	for idx, v := range testForwardedVals {
   958  		vals[idx] = v + float64(intervalIdx)
   959  	}
   960  	return vals
   961  }
   962  
   963  type forwardedValueGenOpts struct {
   964  	forwardedValueGenFn forwardedValueGenFn
   965  }
   966  
   967  var defaultForwardedValueGenOpts = forwardedValueGenOpts{
   968  	forwardedValueGenFn: defaultForwardedValueGenFn,
   969  }
   970  
   971  type valueGenOpts struct {
   972  	untimed     untimedValueGenOpts
   973  	timed       timedValueGenOpts
   974  	forwarded   forwardedValueGenOpts
   975  	passthrough passthroughValueGenOpts
   976  }
   977  
   978  var defaultValueGenOpts = valueGenOpts{
   979  	untimed:     defaultUntimedValueGenOpts,
   980  	timed:       defaultTimedValueGenOpts,
   981  	forwarded:   defaultForwardedValueGenOpts,
   982  	passthrough: defaultPassthroughValueGenOpts,
   983  }
   984  
   985  type datasetGenOpts struct {
   986  	start        time.Time
   987  	stop         time.Time
   988  	interval     time.Duration
   989  	ids          []string
   990  	category     metricCategory
   991  	typeFn       metricTypeFn
   992  	valueGenOpts valueGenOpts
   993  	metadataFn   metadataFn
   994  }