github.com/m3db/m3@v1.5.0/src/query/graphite/ts/series.go (about)

     1  // Copyright (c) 2019 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package ts
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"math"
    27  	"regexp"
    28  	"sort"
    29  	"strconv"
    30  	"strings"
    31  	"time"
    32  
    33  	"github.com/m3db/m3/src/query/block"
    34  	"github.com/m3db/m3/src/query/graphite/context"
    35  	"github.com/m3db/m3/src/query/graphite/stats"
    36  )
    37  
    38  var (
    39  	// ErrRangeIsInvalid is returned when attempting to slice Series with invalid range
    40  	// endpoints (begin is beyond end).
    41  	ErrRangeIsInvalid = errors.New("requested range is invalid")
    42  
    43  	digitsRegex = regexp.MustCompile(`\d+`)
    44  )
    45  
    46  const (
    47  	digits = "0123456789"
    48  )
    49  
    50  // An AggregationFunc combines two data values at a given point.
    51  type AggregationFunc func(a, b float64) float64
    52  
    53  // A Series is the public interface to a block of timeseries values.  Each block has a start time,
    54  // a logical number of steps, and a step size indicating the number of milliseconds represented by each point.
    55  type Series struct {
    56  	name      string
    57  	startTime time.Time
    58  	vals      Values
    59  	ctx       context.Context
    60  
    61  	// The Specification is the path that was used to generate this timeseries,
    62  	// typically either the query, or the function stack used to transform
    63  	// specific results.
    64  	Specification string
    65  
    66  	// consolidationFunc specifies how the series will be consolidated when the
    67  	// number of data points in the series is more than the maximum number allowed.
    68  	consolidationFunc ConsolidationFunc
    69  }
    70  
    71  // SeriesByName implements sort.Interface for sorting collections
    72  // of series by name.
    73  type SeriesByName []*Series
    74  
    75  // Len returns the length of the series collection
    76  func (a SeriesByName) Len() int {
    77  	return len(a)
    78  }
    79  
    80  // Swap swaps two series in the collection
    81  func (a SeriesByName) Swap(i, j int) {
    82  	a[i], a[j] = a[j], a[i]
    83  }
    84  
    85  // Less determines if a series is ordered before another series by name
    86  func (a SeriesByName) Less(i, j int) bool {
    87  	return a[i].name < a[j].name
    88  }
    89  
    90  // SeriesByNameAndNaturalNumbers implements sort.Interface for sorting
    91  // collections of series by name respecting natural sort order for numbers.
    92  type SeriesByNameAndNaturalNumbers []*Series
    93  
    94  // Len returns the length of the series collection
    95  func (a SeriesByNameAndNaturalNumbers) Len() int {
    96  	return len(a)
    97  }
    98  
    99  // Swap swaps two series in the collection
   100  func (a SeriesByNameAndNaturalNumbers) Swap(i, j int) {
   101  	a[i], a[j] = a[j], a[i]
   102  }
   103  
   104  // Less determines if a series is ordered before another series by name
   105  // nolint: ifshort
   106  func (a SeriesByNameAndNaturalNumbers) Less(i, j int) bool {
   107  	left := a[i].name
   108  	if strings.ContainsAny(left, digits) {
   109  		left = digitsRegex.ReplaceAllStringFunc(left, digitsPrefixed)
   110  	}
   111  
   112  	right := a[j].name
   113  	if strings.ContainsAny(right, digits) {
   114  		right = digitsRegex.ReplaceAllStringFunc(right, digitsPrefixed)
   115  	}
   116  
   117  	return left < right
   118  }
   119  
   120  func digitsPrefixed(digits string) string {
   121  	n, err := strconv.Atoi(digits)
   122  	if err != nil {
   123  		return digits
   124  	}
   125  	return fmt.Sprintf("%010d", n)
   126  }
   127  
   128  // NewSeries creates a new Series at a given start time, backed by the provided values
   129  func NewSeries(ctx context.Context, name string, startTime time.Time, vals Values) *Series {
   130  	return &Series{
   131  		name:          name,
   132  		startTime:     startTime,
   133  		vals:          vals,
   134  		ctx:           ctx,
   135  		Specification: name,
   136  	}
   137  }
   138  
   139  // DerivedSeries returns a series derived from the current series with different datapoints
   140  func (b *Series) DerivedSeries(startTime time.Time, vals Values) *Series {
   141  	series := NewSeries(b.ctx, b.name, startTime, vals)
   142  	series.Specification = b.Specification
   143  	series.consolidationFunc = b.consolidationFunc
   144  	return series
   145  }
   146  
   147  // Name returns the name of the timeseries block
   148  func (b *Series) Name() string { return b.name }
   149  
   150  // RenamedTo returns a new timeseries with the same values but a different name
   151  func (b *Series) RenamedTo(name string) *Series {
   152  	return &Series{
   153  		name:              name,
   154  		startTime:         b.startTime,
   155  		vals:              b.vals,
   156  		ctx:               b.ctx,
   157  		Specification:     b.Specification,
   158  		consolidationFunc: b.consolidationFunc,
   159  	}
   160  }
   161  
   162  // Shift returns a new timeseries with the same values but a different startTime
   163  func (b *Series) Shift(shift time.Duration) *Series {
   164  	return &Series{
   165  		name:              b.name,
   166  		startTime:         b.startTime.Add(shift),
   167  		vals:              b.vals,
   168  		ctx:               b.ctx,
   169  		Specification:     b.Specification,
   170  		consolidationFunc: b.consolidationFunc,
   171  	}
   172  }
   173  
   174  // StartTime returns the time the block starts
   175  func (b *Series) StartTime() time.Time { return b.startTime }
   176  
   177  // EndTime returns the time the block ends
   178  func (b *Series) EndTime() time.Time { return b.startTime.Add(b.Duration()) }
   179  
   180  // Duration returns the Duration covered by the block
   181  func (b *Series) Duration() time.Duration {
   182  	return time.Millisecond * time.Duration(b.vals.Len()*b.vals.MillisPerStep())
   183  }
   184  
   185  // MillisPerStep returns the number of milliseconds per step
   186  func (b *Series) MillisPerStep() int { return b.vals.MillisPerStep() }
   187  
   188  // Resolution returns resolution per step
   189  func (b *Series) Resolution() time.Duration {
   190  	return time.Duration(b.MillisPerStep()) * time.Millisecond
   191  }
   192  
   193  // StepAtTime returns the step within the block containing the given time
   194  func (b *Series) StepAtTime(t time.Time) int {
   195  	step := int(t.UnixNano()/1000000-b.startTime.UnixNano()/1000000) / b.vals.MillisPerStep()
   196  	if step < 0 {
   197  		return 0
   198  	}
   199  
   200  	return step
   201  }
   202  
   203  // StartTimeForStep returns the time at which the given step starts
   204  func (b *Series) StartTimeForStep(n int) time.Time {
   205  	return b.StartTime().Add(time.Millisecond * time.Duration(n*b.vals.MillisPerStep()))
   206  }
   207  
   208  // EndTimeForStep returns the time at which the given step end
   209  func (b *Series) EndTimeForStep(n int) time.Time {
   210  	return b.StartTimeForStep(n).Add(time.Millisecond * time.Duration(b.vals.MillisPerStep()))
   211  }
   212  
   213  // Slice returns a new Series composed from a subset of values in the original Series
   214  func (b *Series) Slice(begin, end int) (*Series, error) {
   215  	if begin >= end {
   216  		return nil, ErrRangeIsInvalid
   217  	}
   218  
   219  	result := NewSeries(b.ctx, b.name, b.StartTimeForStep(begin), b.vals.Slice(begin, end))
   220  	result.consolidationFunc = b.consolidationFunc
   221  
   222  	return result, nil
   223  }
   224  
   225  // ValueAtTime returns the value stored at the step representing the given time
   226  func (b *Series) ValueAtTime(t time.Time) float64 {
   227  	return b.ValueAt(b.StepAtTime(t))
   228  }
   229  
   230  // AllNaN returns true if the timeseries is all NaNs
   231  func (b *Series) AllNaN() bool { return b.vals.AllNaN() }
   232  
   233  // CalcStatistics calculates a standard aggregation across the block values
   234  func (b *Series) CalcStatistics() stats.Statistics {
   235  	if agg, ok := b.vals.(CustomStatistics); ok {
   236  		return agg.CalcStatistics()
   237  	}
   238  
   239  	return stats.Calc(b)
   240  }
   241  
   242  // Contains checks whether the given series contains the provided time
   243  func (b *Series) Contains(t time.Time) bool {
   244  	step := b.StepAtTime(t)
   245  	return step >= 0 && step < b.Len()
   246  }
   247  
   248  // Len returns the number of values in the time series.  Used for aggregation
   249  func (b *Series) Len() int { return b.vals.Len() }
   250  
   251  // ValueAt returns the value at a given step.  Used for aggregation
   252  func (b *Series) ValueAt(i int) float64 { return b.vals.ValueAt(i) }
   253  
   254  // SafeMax returns the maximum value of a series that's not an NaN.
   255  func (b *Series) SafeMax() float64 { return b.CalcStatistics().Max }
   256  
   257  // SafeMin returns the minimum value of a series that's not an NaN.
   258  func (b *Series) SafeMin() float64 { return b.CalcStatistics().Min }
   259  
   260  // SafeSum returns the sum of the values of a series, excluding NaNs.
   261  func (b *Series) SafeSum() float64 { return b.CalcStatistics().Sum }
   262  
   263  // SafeAvg returns the average of the values of a series, excluding NaNs.
   264  func (b *Series) SafeAvg() float64 { return b.CalcStatistics().Mean }
   265  
   266  // SafeStdDev returns the standard deviation of the values of a series, excluding NaNs.
   267  func (b *Series) SafeStdDev() float64 { return b.CalcStatistics().StdDev }
   268  
   269  // SafeLastValue returns the last datapoint of a series that's not an NaN.
   270  func (b *Series) SafeLastValue() float64 {
   271  	numPoints := b.Len()
   272  	for i := numPoints - 1; i >= 0; i-- {
   273  		v := b.ValueAt(i)
   274  		if !math.IsNaN(v) {
   275  			return v
   276  		}
   277  	}
   278  	return math.NaN()
   279  }
   280  
   281  // SafeValues returns all non-NaN values in the series.
   282  func (b *Series) SafeValues() []float64 {
   283  	numPoints := b.Len()
   284  	vals := make([]float64, 0, numPoints)
   285  	for i := 0; i < numPoints; i++ {
   286  		v := b.ValueAt(i)
   287  		if !math.IsNaN(v) {
   288  			vals = append(vals, v)
   289  		}
   290  	}
   291  	return vals
   292  }
   293  
   294  // ConsolidationFunc returns the consolidation function for the series,
   295  // or the averaging function is none specified.
   296  func (b *Series) ConsolidationFunc() ConsolidationFunc {
   297  	if b.consolidationFunc != nil {
   298  		return b.consolidationFunc
   299  	}
   300  	return Avg
   301  }
   302  
   303  // IsConsolidationFuncSet if the consolidationFunc is set
   304  func (b *Series) IsConsolidationFuncSet() bool {
   305  	return b.consolidationFunc != nil
   306  }
   307  
   308  // SetConsolidationFunc sets the consolidation function for the series
   309  func (b *Series) SetConsolidationFunc(cf ConsolidationFunc) {
   310  	b.consolidationFunc = cf
   311  }
   312  
   313  // PostConsolidationFunc is a function that takes a tuple of time and value after consolidation.
   314  type PostConsolidationFunc func(timestamp time.Time, value float64)
   315  
   316  // intersection returns a 3-tuple; First return parameter indicates if the intersection spans at
   317  // least one nanosecond; the next two return parameters are the start and end boundary timestamps
   318  // of the resulting overlap.
   319  func (b *Series) intersection(start, end time.Time) (bool, time.Time, time.Time) {
   320  	if b.EndTime().Before(start) || b.StartTime().After(end) {
   321  		return false, start, end
   322  	}
   323  	if start.Before(b.StartTime()) {
   324  		start = b.StartTime()
   325  	}
   326  	if end.After(b.EndTime()) {
   327  		end = b.EndTime()
   328  	}
   329  	if start.Equal(end) {
   330  		return false, start, end
   331  	}
   332  	return true, start, end
   333  }
   334  
   335  // resize takes a time series and returns a new time series of a different step size with aggregated
   336  // values; callers must provide callback method that collects the aggregated result
   337  func (b *Series) resizeStep(start, end time.Time, millisPerStep int,
   338  	stepAggregator ConsolidationFunc, callback PostConsolidationFunc) {
   339  	// panic, panic, panic for all malformed callers
   340  	if end.Before(start) || start.Before(b.StartTime()) || end.After(b.EndTime()) {
   341  		panic("invalid boundary params")
   342  	}
   343  	if b.MillisPerStep() == millisPerStep {
   344  		panic("requires different step size")
   345  	}
   346  	if b.MillisPerStep() < millisPerStep {
   347  		// Series step size is smaller than consolidation - aggregate each series step then apply
   348  		// the agggregated value to the consolidate.
   349  		seriesValuesPerStep := millisPerStep / b.MillisPerStep()
   350  		seriesStart, seriesEnd := b.StepAtTime(start), b.StepAtTime(end)
   351  		for n := seriesStart; n < seriesEnd; n += seriesValuesPerStep {
   352  			timestamp := b.StartTimeForStep(n)
   353  			aggregatedValue := math.NaN()
   354  			count := 0
   355  
   356  			for i := 0; i < seriesValuesPerStep && n+i < seriesEnd; i++ {
   357  				value := b.ValueAt(n + i)
   358  				aggregatedValue, count = consolidateValues(aggregatedValue, value, count,
   359  					stepAggregator)
   360  			}
   361  			callback(timestamp, aggregatedValue)
   362  		}
   363  		return
   364  	}
   365  }
   366  
   367  // resized implements PostConsolidationFunc.
   368  type resized struct {
   369  	values []float64
   370  }
   371  
   372  // appender adds new values to resized.values.
   373  func (v *resized) appender(timestamp time.Time, value float64) {
   374  	v.values = append(v.values, value)
   375  }
   376  
   377  // IntersectAndResize returns a new time series with a different millisPerStep that spans the
   378  // intersection of the underlying timeseries and the provided start and end time parameters
   379  func (b *Series) IntersectAndResize(
   380  	start, end time.Time,
   381  	millisPerStep int,
   382  	stepAggregator ConsolidationFunc,
   383  ) (*Series, error) {
   384  	intersects, start, end := b.intersection(start, end)
   385  	if !intersects {
   386  		ts := NewSeries(b.ctx, b.name, start, &float64Values{
   387  			millisPerStep: millisPerStep,
   388  			values:        []float64{},
   389  			numSteps:      0,
   390  		})
   391  		ts.Specification = b.Specification
   392  		return ts, nil
   393  	}
   394  	if b.MillisPerStep() == millisPerStep {
   395  		return b.Slice(b.StepAtTime(start), b.StepAtTime(end))
   396  	}
   397  	return b.resized(start, end, millisPerStep, stepAggregator), nil
   398  }
   399  
   400  func (b *Series) resized(
   401  	start, end time.Time,
   402  	millisPerStep int,
   403  	stepAggregator ConsolidationFunc,
   404  ) *Series {
   405  	// TODO: This append based model completely screws pooling; need to rewrite to allow for pooling.
   406  	v := &resized{}
   407  	b.resizeStep(start, end, millisPerStep, stepAggregator, v.appender)
   408  	ts := NewSeries(b.ctx, b.name, start, &float64Values{
   409  		millisPerStep: millisPerStep,
   410  		values:        v.values,
   411  		numSteps:      len(v.values),
   412  	})
   413  	ts.Specification = b.Specification
   414  	return ts
   415  }
   416  
   417  // NeedsResizeToMaxDataPoints returns whether the series needs resizing to max datapoints.
   418  func (b *Series) NeedsResizeToMaxDataPoints(maxDataPoints int64) bool {
   419  	if maxDataPoints <= 0 {
   420  		// No max datapoints specified.
   421  		return false
   422  	}
   423  	return int64(b.Len()) > maxDataPoints
   424  }
   425  
   426  // ResizeToMaxDataPointsMillisPerStep returns the new milliseconds per second
   427  // required if a series needs resizing and true, or if does not need resize
   428  // for max datapoints then it returns 0 and false.
   429  func (b *Series) ResizeToMaxDataPointsMillisPerStep(
   430  	maxDataPoints int64,
   431  ) (int, bool) {
   432  	if !b.NeedsResizeToMaxDataPoints(maxDataPoints) {
   433  		return 0, false
   434  	}
   435  	samplingMultiplier := math.Ceil(float64(b.Len()) / float64(maxDataPoints))
   436  	return int(samplingMultiplier * float64(b.MillisPerStep())), true
   437  }
   438  
   439  // ResizeToMaxDataPoints resizes the series to fit max datapoints and returns
   440  // true if a series was resized or false if it did not need to be resized.
   441  func (b *Series) ResizeToMaxDataPoints(
   442  	maxDataPoints int64,
   443  	stepAggregator ConsolidationFunc,
   444  ) (*Series, bool) {
   445  	resizeMillisPerStep, needsResize := b.ResizeToMaxDataPointsMillisPerStep(maxDataPoints)
   446  	if !needsResize {
   447  		return nil, false
   448  	}
   449  	return b.resized(b.StartTime(), b.EndTime(), resizeMillisPerStep, stepAggregator), true
   450  }
   451  
   452  // A MutableSeries is a Series that allows updates
   453  type MutableSeries struct {
   454  	Series
   455  }
   456  
   457  // NewMutableSeries returns a new mutable Series at the
   458  // given start time and backed by the provided storage
   459  func NewMutableSeries(
   460  	ctx context.Context,
   461  	name string,
   462  	startTime time.Time,
   463  	vals MutableValues) *MutableSeries {
   464  	return &MutableSeries{
   465  		Series{
   466  			name:          name,
   467  			startTime:     startTime,
   468  			vals:          vals,
   469  			ctx:           ctx,
   470  			Specification: name,
   471  		},
   472  	}
   473  }
   474  
   475  // SetValueAt sets the value at the given step
   476  func (b *MutableSeries) SetValueAt(i int, v float64) {
   477  	b.vals.(MutableValues).SetValueAt(i, v)
   478  }
   479  
   480  // SetValueAtTime sets the value at the step containing the given time
   481  func (b *MutableSeries) SetValueAtTime(t time.Time, v float64) {
   482  	b.SetValueAt(b.StepAtTime(t), v)
   483  }
   484  
   485  // A Consolidation produces a Series whose values are the result of applying a consolidation
   486  // function to all of the datapoints that fall within each step.  It can used to quantize raw
   487  // datapoints into a given resolution, for example, or to aggregate multiple timeseries at the
   488  // same or smaller resolutions.
   489  type Consolidation interface {
   490  	// AddDatapoint adds an individual datapoint to the consolidation.
   491  	AddDatapoint(timestamp time.Time, value float64)
   492  
   493  	// AddDatapoints adds a set of datapoints to the consolidation.
   494  	AddDatapoints(datapoints []Datapoint)
   495  
   496  	// AddSeries adds the datapoints for each series to the consolidation.  The
   497  	// stepAggregationFunc is used to combine values from the series if the series
   498  	// has a smaller step size than the consolidation.  For example, an application
   499  	// might want to produce a consolidation which is a minimum of the input timeseries,
   500  	// but where the values in smaller timeseries units are summed together to
   501  	// produce the value to which the consolidation applies.
   502  	// To put it in another way, stepAggregationFunc is used for the series to resize itself
   503  	// rather than for the consolidation
   504  	AddSeries(series *Series, stepAggregationFunc ConsolidationFunc)
   505  
   506  	// BuildSeries returns the consolidated Series and optionally finalizes
   507  	// the consolidation returning it to the pool
   508  	BuildSeries(id string, finalize FinalizeOption) *Series
   509  
   510  	// Finalize returns the consolidation to the pool
   511  	Finalize()
   512  }
   513  
   514  // FinalizeOption specifies the option to finalize or avoid finalizing
   515  type FinalizeOption int
   516  
   517  const (
   518  	// NoFinalize will avoid finalizing the subject
   519  	NoFinalize FinalizeOption = iota
   520  	// Finalize will finalize the subject
   521  	Finalize
   522  )
   523  
   524  // A ConsolidationFunc consolidates values at a given point in time.  It takes the current consolidated
   525  // value, the new value to add to the consolidation, and a count of the number of values that have
   526  // already been consolidated.
   527  type ConsolidationFunc func(existing, toAdd float64, count int) float64
   528  
   529  // NewConsolidation creates a new consolidation window.
   530  func NewConsolidation(
   531  	ctx context.Context,
   532  	start, end time.Time,
   533  	millisPerStep int,
   534  	cf ConsolidationFunc,
   535  ) Consolidation {
   536  	var (
   537  		numSteps = NumSteps(start, end, millisPerStep)
   538  		values   = NewValues(ctx, millisPerStep, numSteps)
   539  		c        *consolidation
   540  		pooled   = false
   541  	)
   542  
   543  	if consolidationPools != nil {
   544  		temp := consolidationPools.Get(numSteps)
   545  		c = temp.(*consolidation)
   546  		if cap(c.counts) >= numSteps {
   547  			c.counts = c.counts[:numSteps]
   548  			for i := range c.counts {
   549  				c.counts[i] = 0
   550  			}
   551  			pooled = true
   552  		}
   553  	}
   554  
   555  	if !pooled {
   556  		c = newConsolidation(numSteps)
   557  	}
   558  
   559  	c.ctx = ctx
   560  	c.start = start
   561  	c.end = end
   562  	c.millisPerStep = millisPerStep
   563  	c.values = values
   564  	c.f = cf
   565  
   566  	return c
   567  }
   568  
   569  func newConsolidation(numSteps int) *consolidation {
   570  	counts := make([]int, numSteps)
   571  	return &consolidation{
   572  		counts: counts,
   573  	}
   574  }
   575  
   576  type consolidation struct {
   577  	ctx           context.Context
   578  	start         time.Time
   579  	end           time.Time
   580  	millisPerStep int
   581  	values        MutableValues
   582  	counts        []int
   583  	f             ConsolidationFunc
   584  }
   585  
   586  func (c *consolidation) AddDatapoints(datapoints []Datapoint) {
   587  	for _, datapoint := range datapoints {
   588  		c.AddDatapoint(datapoint.Timestamp, datapoint.Value)
   589  	}
   590  }
   591  
   592  func (c *consolidation) AddDatapoint(timestamp time.Time, value float64) {
   593  	if timestamp.Before(c.start) || timestamp.After(c.end) {
   594  		return
   595  	}
   596  
   597  	if math.IsNaN(value) {
   598  		return
   599  	}
   600  
   601  	step := int(timestamp.UnixNano()/1000000-c.start.UnixNano()/1000000) / c.millisPerStep
   602  	if step >= c.values.Len() {
   603  		return
   604  	}
   605  
   606  	n, count := consolidateValues(c.values.ValueAt(step), value, c.counts[step], c.f)
   607  	c.counts[step] = count
   608  	c.values.SetValueAt(step, n)
   609  }
   610  
   611  func consolidateValues(current, value float64, count int, f ConsolidationFunc) (float64, int) {
   612  	if math.IsNaN(value) {
   613  		return current, count
   614  	}
   615  
   616  	if count == 0 {
   617  		return value, 1
   618  	}
   619  
   620  	return f(current, value, count), count + 1
   621  }
   622  
   623  // AddSeries adds a time series to the consolidation; stepAggregator is used to resize the
   624  // provided timeseries if it's step size is different from the consolidator's step size.
   625  func (c *consolidation) AddSeries(series *Series, stepAggregator ConsolidationFunc) {
   626  	if series.AllNaN() {
   627  		return
   628  	}
   629  
   630  	intersects, start, end := series.intersection(c.start, c.end)
   631  	if !intersects {
   632  		// Nothing to do.
   633  		return
   634  	}
   635  
   636  	if series.MillisPerStep() == c.millisPerStep {
   637  		// Series step size is identical to the consolidation: simply apply each series value to
   638  		// the consolidation.
   639  		startIndex := series.StepAtTime(start)
   640  		endIndex := int(math.Min(float64(series.StepAtTime(end)), float64(series.Len()-1)))
   641  		for n := startIndex; n <= endIndex; n++ {
   642  			c.AddDatapoint(series.StartTimeForStep(n), series.ValueAt(n))
   643  		}
   644  		return
   645  	}
   646  	series.resizeStep(start, end, c.millisPerStep, stepAggregator, c.AddDatapoint)
   647  }
   648  
   649  func (c *consolidation) BuildSeries(id string, f FinalizeOption) *Series {
   650  	series := NewSeries(c.ctx, id, c.start, c.values)
   651  	if f == Finalize {
   652  		c.Finalize()
   653  	}
   654  	return series
   655  }
   656  
   657  func (c *consolidation) Finalize() {
   658  	c.ctx = nil
   659  	c.start = time.Time{}
   660  	c.end = time.Time{}
   661  	c.millisPerStep = 0
   662  	c.values = nil
   663  	c.f = nil
   664  	if consolidationPools == nil {
   665  		return
   666  	}
   667  	consolidationPools.Put(c, cap(c.counts))
   668  }
   669  
   670  // NumSteps calculates the number of steps of a given size between two times.
   671  func NumSteps(start, end time.Time, millisPerStep int) int {
   672  	// We should round up.
   673  	numSteps := int(math.Ceil(float64(
   674  		end.Sub(start)/time.Millisecond) / float64(millisPerStep)))
   675  
   676  	if numSteps > 0 {
   677  		return numSteps
   678  	}
   679  
   680  	// Even for intervals less than millisPerStep, there should be at least one step.
   681  	return 1
   682  }
   683  
   684  // Sum sums two values.
   685  func Sum(a, b float64, count int) float64 { return a + b }
   686  
   687  // Mul multiplies two values.
   688  func Mul(a, b float64, count int) float64 { return a * b }
   689  
   690  // Avg produces a running average.
   691  func Avg(a, b float64, count int) float64 { return (a*float64(count) + b) / float64(count+1) }
   692  
   693  // Min finds the min of two values.
   694  func Min(a, b float64, count int) float64 { return math.Min(a, b) }
   695  
   696  // Max finds the max of two values.
   697  func Max(a, b float64, count int) float64 { return math.Max(a, b) }
   698  
   699  // Last finds the latter of two values.
   700  func Last(a, b float64, count int) float64 { return b }
   701  
   702  // Pow returns the first value to the power of the second value
   703  func Pow(a, b float64, count int) float64 { return math.Pow(a, b) }
   704  
   705  // Median finds the median of a slice of values.
   706  func Median(vals []float64, count int) float64 {
   707  	if count < 1 {
   708  		return math.NaN()
   709  	}
   710  	if count == 1 {
   711  		return vals[0]
   712  	}
   713  	sort.Float64s(vals)
   714  	if count%2 != 0 {
   715  		// if count is odd
   716  		return vals[(count-1)/2]
   717  	}
   718  	// if count is even
   719  	return (vals[count/2] + vals[(count/2)-1]) / 2.0
   720  }
   721  
   722  // Gcd finds the gcd of two values.
   723  func Gcd(a, b int64) int64 {
   724  	if a < 0 {
   725  		a = -a
   726  	}
   727  
   728  	if b < 0 {
   729  		b = -b
   730  	}
   731  
   732  	if b == 0 {
   733  		return a
   734  	}
   735  
   736  	return Gcd(b, a%b)
   737  }
   738  
   739  // Lcm finds the lcm of two values.
   740  func Lcm(a, b int64) int64 {
   741  	if a < 0 {
   742  		a = -a
   743  	}
   744  
   745  	if b < 0 {
   746  		b = -b
   747  	}
   748  
   749  	if a == b {
   750  		return a
   751  	}
   752  
   753  	if a < b {
   754  		a, b = b, a
   755  	}
   756  
   757  	return a / Gcd(a, b) * b
   758  }
   759  
   760  // A SeriesList is a list of series.
   761  type SeriesList struct {
   762  	// Values is the list of series.
   763  	Values []*Series
   764  	// SortApplied specifies whether a specific sort order has been applied.
   765  	SortApplied bool
   766  	// Metadata contains any additional metadata indicating information about
   767  	// series execution.
   768  	Metadata block.ResultMetadata
   769  }
   770  
   771  // NewSeriesList creates a blank series list.
   772  func NewSeriesList() SeriesList {
   773  	return SeriesList{Metadata: block.NewResultMetadata()}
   774  }
   775  
   776  // NewSeriesListWithSeries creates a series list with the given series and
   777  // default metadata.
   778  func NewSeriesListWithSeries(values ...*Series) SeriesList {
   779  	return SeriesList{
   780  		Values:   values,
   781  		Metadata: block.NewResultMetadata(),
   782  	}
   783  }
   784  
   785  // Len returns the length of the list.
   786  func (l SeriesList) Len() int {
   787  	return len(l.Values)
   788  }