github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/sem/builtins/aggregate_builtins.go (about)

     1  // Copyright 2015 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package builtins
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"math"
    18  	"time"
    19  	"unsafe"
    20  
    21  	"github.com/cockroachdb/apd"
    22  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    23  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    26  	"github.com/cockroachdb/cockroach/pkg/util/arith"
    27  	"github.com/cockroachdb/cockroach/pkg/util/bitarray"
    28  	"github.com/cockroachdb/cockroach/pkg/util/duration"
    29  	"github.com/cockroachdb/cockroach/pkg/util/json"
    30  	"github.com/cockroachdb/cockroach/pkg/util/mon"
    31  	"github.com/cockroachdb/errors"
    32  )
    33  
    34  func initAggregateBuiltins() {
    35  	// Add all aggregates to the Builtins map after a few sanity checks.
    36  	for k, v := range aggregates {
    37  		if _, exists := builtins[k]; exists {
    38  			panic("duplicate builtin: " + k)
    39  		}
    40  
    41  		if !v.props.Impure {
    42  			panic(fmt.Sprintf("%s: aggregate functions should all be impure, found %v", k, v))
    43  		}
    44  		if v.props.Class != tree.AggregateClass {
    45  			panic(fmt.Sprintf("%s: aggregate functions should be marked with the tree.AggregateClass "+
    46  				"function class, found %v", k, v))
    47  		}
    48  		for _, a := range v.overloads {
    49  			if a.AggregateFunc == nil {
    50  				panic(fmt.Sprintf("%s: aggregate functions should have tree.AggregateFunc constructors, "+
    51  					"found %v", k, a))
    52  			}
    53  			if a.WindowFunc == nil {
    54  				panic(fmt.Sprintf("%s: aggregate functions should have tree.WindowFunc constructors, "+
    55  					"found %v", k, a))
    56  			}
    57  		}
    58  
    59  		builtins[k] = v
    60  	}
    61  }
    62  
    63  func aggProps() tree.FunctionProperties {
    64  	return tree.FunctionProperties{Class: tree.AggregateClass, Impure: true}
    65  }
    66  
    67  func aggPropsNullableArgs() tree.FunctionProperties {
    68  	f := aggProps()
    69  	f.NullableArgs = true
    70  	return f
    71  }
    72  
    73  // allMaxMinAggregateTypes contains extra types that aren't in
    74  // types.Scalar that the max/min aggregate functions are defined on.
    75  var allMaxMinAggregateTypes = append(
    76  	[]*types.T{types.AnyCollatedString, types.AnyEnum},
    77  	types.Scalar...,
    78  )
    79  
    80  // aggregates are a special class of builtin functions that are wrapped
    81  // at execution in a bucketing layer to combine (aggregate) the result
    82  // of the function being run over many rows.
    83  //
    84  // See `aggregateFuncHolder` in the sql package.
    85  //
    86  // In particular they must not be simplified during normalization
    87  // (and thus must be marked as impure), even when they are given a
    88  // constant argument (e.g. SUM(1)). This is because aggregate
    89  // functions must return NULL when they are no rows in the source
    90  // table, so their evaluation must always be delayed until query
    91  // execution.
    92  //
    93  // Some aggregate functions must handle nullable arguments, since normalizing
    94  // an aggregate function call to NULL in the presence of a NULL argument may
    95  // not be correct. There are two cases where an aggregate function must handle
    96  // nullable arguments:
    97  // 1) the aggregate function does not skip NULLs (e.g., ARRAY_AGG); and
    98  // 2) the aggregate function does not return NULL when it aggregates no rows
    99  //		(e.g., COUNT).
   100  //
   101  // For use in other packages, see AllAggregateBuiltinNames and
   102  // GetBuiltinProperties().
   103  // These functions are also identified with Class == tree.AggregateClass.
   104  // The properties are reachable via tree.FunctionDefinition.
   105  var aggregates = map[string]builtinDefinition{
   106  	"array_agg": setProps(aggPropsNullableArgs(),
   107  		arrayBuiltin(func(t *types.T) tree.Overload {
   108  			return makeAggOverloadWithReturnType(
   109  				[]*types.T{t},
   110  				func(args []tree.TypedExpr) *types.T {
   111  					if len(args) == 0 {
   112  						return types.MakeArray(t)
   113  					}
   114  					// Whenever possible, use the expression's type, so we can properly
   115  					// handle aliased types that don't explicitly have overloads.
   116  					return types.MakeArray(args[0].ResolvedType())
   117  				},
   118  				newArrayAggregate,
   119  				"Aggregates the selected values into an array.",
   120  				tree.VolatilityImmutable,
   121  			)
   122  		})),
   123  
   124  	"avg": makeBuiltin(aggProps(),
   125  		makeAggOverload([]*types.T{types.Int}, types.Decimal, newIntAvgAggregate,
   126  			"Calculates the average of the selected values.", tree.VolatilityImmutable),
   127  		makeAggOverload([]*types.T{types.Float}, types.Float, newFloatAvgAggregate,
   128  			"Calculates the average of the selected values.", tree.VolatilityImmutable),
   129  		makeAggOverload([]*types.T{types.Decimal}, types.Decimal, newDecimalAvgAggregate,
   130  			"Calculates the average of the selected values.", tree.VolatilityImmutable),
   131  		makeAggOverload([]*types.T{types.Interval}, types.Interval, newIntervalAvgAggregate,
   132  			"Calculates the average of the selected values.", tree.VolatilityImmutable),
   133  	),
   134  
   135  	"bit_and": makeBuiltin(aggProps(),
   136  		makeAggOverload([]*types.T{types.Int}, types.Int, newIntBitAndAggregate,
   137  			"Calculates the bitwise AND of all non-null input values, or null if none.", tree.VolatilityImmutable),
   138  		makeAggOverload([]*types.T{types.VarBit}, types.VarBit, newBitBitAndAggregate,
   139  			"Calculates the bitwise AND of all non-null input values, or null if none.", tree.VolatilityImmutable),
   140  	),
   141  
   142  	"bit_or": makeBuiltin(aggProps(),
   143  		makeAggOverload([]*types.T{types.Int}, types.Int, newIntBitOrAggregate,
   144  			"Calculates the bitwise OR of all non-null input values, or null if none.", tree.VolatilityImmutable),
   145  		makeAggOverload([]*types.T{types.VarBit}, types.VarBit, newBitBitOrAggregate,
   146  			"Calculates the bitwise OR of all non-null input values, or null if none.", tree.VolatilityImmutable),
   147  	),
   148  
   149  	"bool_and": makeBuiltin(aggProps(),
   150  		makeAggOverload([]*types.T{types.Bool}, types.Bool, newBoolAndAggregate,
   151  			"Calculates the boolean value of `AND`ing all selected values.", tree.VolatilityImmutable),
   152  	),
   153  
   154  	"bool_or": makeBuiltin(aggProps(),
   155  		makeAggOverload([]*types.T{types.Bool}, types.Bool, newBoolOrAggregate,
   156  			"Calculates the boolean value of `OR`ing all selected values.", tree.VolatilityImmutable),
   157  	),
   158  
   159  	"concat_agg": makeBuiltin(aggProps(),
   160  		makeAggOverload([]*types.T{types.String}, types.String, newStringConcatAggregate,
   161  			"Concatenates all selected values.", tree.VolatilityImmutable),
   162  		makeAggOverload([]*types.T{types.Bytes}, types.Bytes, newBytesConcatAggregate,
   163  			"Concatenates all selected values.", tree.VolatilityImmutable),
   164  		// TODO(eisen): support collated strings when the type system properly
   165  		// supports parametric types.
   166  	),
   167  
   168  	"corr": makeBuiltin(aggProps(),
   169  		makeAggOverload([]*types.T{types.Float, types.Float}, types.Float, newCorrAggregate,
   170  			"Calculates the correlation coefficient of the selected values.", tree.VolatilityImmutable),
   171  		makeAggOverload([]*types.T{types.Int, types.Int}, types.Float, newCorrAggregate,
   172  			"Calculates the correlation coefficient of the selected values.", tree.VolatilityImmutable),
   173  		makeAggOverload([]*types.T{types.Float, types.Int}, types.Float, newCorrAggregate,
   174  			"Calculates the correlation coefficient of the selected values.", tree.VolatilityImmutable),
   175  		makeAggOverload([]*types.T{types.Int, types.Float}, types.Float, newCorrAggregate,
   176  			"Calculates the correlation coefficient of the selected values.", tree.VolatilityImmutable),
   177  	),
   178  
   179  	"count": makeBuiltin(aggPropsNullableArgs(),
   180  		makeAggOverload([]*types.T{types.Any}, types.Int, newCountAggregate,
   181  			"Calculates the number of selected elements.", tree.VolatilityImmutable),
   182  	),
   183  
   184  	"count_rows": makeBuiltin(aggProps(),
   185  		tree.Overload{
   186  			Types:         tree.ArgTypes{},
   187  			ReturnType:    tree.FixedReturnType(types.Int),
   188  			AggregateFunc: newCountRowsAggregate,
   189  			WindowFunc: func(params []*types.T, evalCtx *tree.EvalContext) tree.WindowFunc {
   190  				return newFramableAggregateWindow(
   191  					newCountRowsAggregate(params, evalCtx, nil /* arguments */),
   192  					func(evalCtx *tree.EvalContext, arguments tree.Datums) tree.AggregateFunc {
   193  						return newCountRowsAggregate(params, evalCtx, arguments)
   194  					},
   195  				)
   196  			},
   197  			Info:       "Calculates the number of rows.",
   198  			Volatility: tree.VolatilityImmutable,
   199  		},
   200  	),
   201  
   202  	"every": makeBuiltin(aggProps(),
   203  		makeAggOverload([]*types.T{types.Bool}, types.Bool, newBoolAndAggregate,
   204  			"Calculates the boolean value of `AND`ing all selected values.", tree.VolatilityImmutable),
   205  	),
   206  
   207  	"max": collectOverloads(aggProps(), allMaxMinAggregateTypes,
   208  		func(t *types.T) tree.Overload {
   209  			info := "Identifies the maximum selected value."
   210  			vol := tree.VolatilityImmutable
   211  			// If t is an ambiguous type (like AnyCollatedString), then our aggregate
   212  			// does not have a fixed return type.
   213  			if t.IsAmbiguous() {
   214  				return makeAggOverloadWithReturnType(
   215  					[]*types.T{t}, tree.FirstNonNullReturnType(), newMaxAggregate, info, vol,
   216  				)
   217  			}
   218  			return makeAggOverload([]*types.T{t}, t, newMaxAggregate, info, vol)
   219  		}),
   220  
   221  	"min": collectOverloads(aggProps(), allMaxMinAggregateTypes,
   222  		func(t *types.T) tree.Overload {
   223  			info := "Identifies the minimum selected value."
   224  			vol := tree.VolatilityImmutable
   225  			// If t is an ambiguous type (like AnyCollatedString), then our aggregate
   226  			// does not have a fixed return type.
   227  			if t.IsAmbiguous() {
   228  				return makeAggOverloadWithReturnType(
   229  					[]*types.T{t}, tree.FirstNonNullReturnType(), newMinAggregate, info, vol,
   230  				)
   231  			}
   232  			return makeAggOverload([]*types.T{t}, t, newMinAggregate, info, vol)
   233  		}),
   234  
   235  	"string_agg": makeBuiltin(aggPropsNullableArgs(),
   236  		makeAggOverload([]*types.T{types.String, types.String}, types.String, newStringConcatAggregate,
   237  			"Concatenates all selected values using the provided delimiter.", tree.VolatilityImmutable),
   238  		makeAggOverload([]*types.T{types.Bytes, types.Bytes}, types.Bytes, newBytesConcatAggregate,
   239  			"Concatenates all selected values using the provided delimiter.", tree.VolatilityImmutable),
   240  	),
   241  
   242  	"sum_int": makeBuiltin(aggProps(),
   243  		makeAggOverload([]*types.T{types.Int}, types.Int, newSmallIntSumAggregate,
   244  			"Calculates the sum of the selected values.", tree.VolatilityImmutable),
   245  	),
   246  
   247  	"sum": makeBuiltin(aggProps(),
   248  		makeAggOverload([]*types.T{types.Int}, types.Decimal, newIntSumAggregate,
   249  			"Calculates the sum of the selected values.", tree.VolatilityImmutable),
   250  		makeAggOverload([]*types.T{types.Float}, types.Float, newFloatSumAggregate,
   251  			"Calculates the sum of the selected values.", tree.VolatilityImmutable),
   252  		makeAggOverload([]*types.T{types.Decimal}, types.Decimal, newDecimalSumAggregate,
   253  			"Calculates the sum of the selected values.", tree.VolatilityImmutable),
   254  		makeAggOverload([]*types.T{types.Interval}, types.Interval, newIntervalSumAggregate,
   255  			"Calculates the sum of the selected values.", tree.VolatilityImmutable),
   256  	),
   257  
   258  	"sqrdiff": makeBuiltin(aggProps(),
   259  		makeAggOverload([]*types.T{types.Int}, types.Decimal, newIntSqrDiffAggregate,
   260  			"Calculates the sum of squared differences from the mean of the selected values.", tree.VolatilityImmutable),
   261  		makeAggOverload([]*types.T{types.Decimal}, types.Decimal, newDecimalSqrDiffAggregate,
   262  			"Calculates the sum of squared differences from the mean of the selected values.", tree.VolatilityImmutable),
   263  		makeAggOverload([]*types.T{types.Float}, types.Float, newFloatSqrDiffAggregate,
   264  			"Calculates the sum of squared differences from the mean of the selected values.", tree.VolatilityImmutable),
   265  	),
   266  
   267  	// final_(variance|stddev) computes the global (variance|standard deviation)
   268  	// from an arbitrary collection of local sums of squared difference from the mean.
   269  	// Adapted from https://www.johndcook.com/blog/skewness_kurtosis and
   270  	// https://github.com/cockroachdb/cockroach/pull/17728.
   271  
   272  	// TODO(knz): The 3-argument final_variance and final_stddev are
   273  	// only defined for internal use by distributed aggregations. They
   274  	// are marked as "private" so as to not trigger panics from issue
   275  	// #10495.
   276  
   277  	// The input signature is: SQDIFF, SUM, COUNT
   278  	"final_variance": makePrivate(makeBuiltin(aggProps(),
   279  		makeAggOverload(
   280  			[]*types.T{types.Decimal, types.Decimal, types.Int},
   281  			types.Decimal,
   282  			newDecimalFinalVarianceAggregate,
   283  			"Calculates the variance from the selected locally-computed squared difference values.",
   284  			tree.VolatilityImmutable,
   285  		),
   286  		makeAggOverload(
   287  			[]*types.T{types.Float, types.Float, types.Int},
   288  			types.Float,
   289  			newFloatFinalVarianceAggregate,
   290  			"Calculates the variance from the selected locally-computed squared difference values.",
   291  			tree.VolatilityImmutable,
   292  		),
   293  	)),
   294  
   295  	"final_stddev": makePrivate(makeBuiltin(aggProps(),
   296  		makeAggOverload(
   297  			[]*types.T{types.Decimal,
   298  				types.Decimal, types.Int},
   299  			types.Decimal,
   300  			newDecimalFinalStdDevAggregate,
   301  			"Calculates the standard deviation from the selected locally-computed squared difference values.",
   302  			tree.VolatilityImmutable,
   303  		),
   304  		makeAggOverload(
   305  			[]*types.T{types.Float, types.Float, types.Int},
   306  			types.Float,
   307  			newFloatFinalStdDevAggregate,
   308  			"Calculates the standard deviation from the selected locally-computed squared difference values.",
   309  			tree.VolatilityImmutable,
   310  		),
   311  	)),
   312  
   313  	// variance is a historical alias for var_samp.
   314  	"variance": makeVarianceBuiltin(),
   315  	"var_samp": makeVarianceBuiltin(),
   316  
   317  	// stddev is a historical alias for stddev_samp.
   318  	"stddev":      makeStdDevBuiltin(),
   319  	"stddev_samp": makeStdDevBuiltin(),
   320  
   321  	"xor_agg": makeBuiltin(aggProps(),
   322  		makeAggOverload([]*types.T{types.Bytes}, types.Bytes, newBytesXorAggregate,
   323  			"Calculates the bitwise XOR of the selected values.", tree.VolatilityImmutable),
   324  		makeAggOverload([]*types.T{types.Int}, types.Int, newIntXorAggregate,
   325  			"Calculates the bitwise XOR of the selected values.", tree.VolatilityImmutable),
   326  	),
   327  
   328  	"json_agg": makeBuiltin(aggPropsNullableArgs(),
   329  		makeAggOverload([]*types.T{types.Any}, types.Jsonb, newJSONAggregate,
   330  			"Aggregates values as a JSON or JSONB array.", tree.VolatilityStable),
   331  	),
   332  
   333  	"jsonb_agg": makeBuiltin(aggPropsNullableArgs(),
   334  		makeAggOverload([]*types.T{types.Any}, types.Jsonb, newJSONAggregate,
   335  			"Aggregates values as a JSON or JSONB array.", tree.VolatilityStable),
   336  	),
   337  
   338  	"json_object_agg":  makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Class: tree.AggregateClass, Impure: true}),
   339  	"jsonb_object_agg": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Class: tree.AggregateClass, Impure: true}),
   340  
   341  	AnyNotNull: makePrivate(makeBuiltin(aggProps(),
   342  		makeAggOverloadWithReturnType(
   343  			[]*types.T{types.Any},
   344  			tree.IdentityReturnType(0),
   345  			newAnyNotNullAggregate,
   346  			"Returns an arbitrary not-NULL value, or NULL if none exists.",
   347  			tree.VolatilityImmutable,
   348  		))),
   349  
   350  	// Ordered-set aggregations.
   351  	"percentile_disc": makeBuiltin(aggProps(),
   352  		makeAggOverloadWithReturnType(
   353  			[]*types.T{types.Float},
   354  			func(args []tree.TypedExpr) *types.T { return tree.UnknownReturnType },
   355  			builtinMustNotRun,
   356  			"Discrete percentile: returns the first input value whose position in the ordering equals or "+
   357  				"exceeds the specified fraction.",
   358  			tree.VolatilityImmutable),
   359  		makeAggOverloadWithReturnType(
   360  			[]*types.T{types.FloatArray},
   361  			func(args []tree.TypedExpr) *types.T { return tree.UnknownReturnType },
   362  			builtinMustNotRun,
   363  			"Discrete percentile: returns input values whose position in the ordering equals or "+
   364  				"exceeds the specified fractions.",
   365  			tree.VolatilityImmutable),
   366  	),
   367  	"percentile_disc_impl": makePrivate(collectOverloads(aggProps(), types.Scalar,
   368  		func(t *types.T) tree.Overload {
   369  			return makeAggOverload([]*types.T{types.Float, t}, t, newPercentileDiscAggregate,
   370  				"Implementation of percentile_disc.",
   371  				tree.VolatilityImmutable)
   372  		},
   373  		func(t *types.T) tree.Overload {
   374  			return makeAggOverload([]*types.T{types.FloatArray, t}, types.MakeArray(t), newPercentileDiscAggregate,
   375  				"Implementation of percentile_disc.",
   376  				tree.VolatilityImmutable)
   377  		},
   378  	)),
   379  	"percentile_cont": makeBuiltin(aggProps(),
   380  		makeAggOverload(
   381  			[]*types.T{types.Float},
   382  			types.Float,
   383  			builtinMustNotRun,
   384  			"Continuous percentile: returns a float corresponding to the specified fraction in the ordering, "+
   385  				"interpolating between adjacent input floats if needed.",
   386  			tree.VolatilityImmutable),
   387  		makeAggOverload(
   388  			[]*types.T{types.Float},
   389  			types.Interval,
   390  			builtinMustNotRun,
   391  			"Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, "+
   392  				"interpolating between adjacent input intervals if needed.",
   393  			tree.VolatilityImmutable),
   394  		makeAggOverload(
   395  			[]*types.T{types.FloatArray},
   396  			types.MakeArray(types.Float),
   397  			builtinMustNotRun,
   398  			"Continuous percentile: returns floats corresponding to the specified fractions in the ordering, "+
   399  				"interpolating between adjacent input floats if needed.",
   400  			tree.VolatilityImmutable),
   401  		makeAggOverload(
   402  			[]*types.T{types.FloatArray},
   403  			types.MakeArray(types.Interval),
   404  			builtinMustNotRun,
   405  			"Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, "+
   406  				"interpolating between adjacent input intervals if needed.",
   407  			tree.VolatilityImmutable),
   408  	),
   409  	"percentile_cont_impl": makePrivate(makeBuiltin(aggProps(),
   410  		makeAggOverload(
   411  			[]*types.T{types.Float, types.Float},
   412  			types.Float,
   413  			newPercentileContAggregate,
   414  			"Implementation of percentile_cont.",
   415  			tree.VolatilityImmutable),
   416  		makeAggOverload(
   417  			[]*types.T{types.Float, types.Interval},
   418  			types.Interval,
   419  			newPercentileContAggregate,
   420  			"Implementation of percentile_cont.",
   421  			tree.VolatilityImmutable),
   422  		makeAggOverload(
   423  			[]*types.T{types.FloatArray, types.Float},
   424  			types.MakeArray(types.Float),
   425  			newPercentileContAggregate,
   426  			"Implementation of percentile_cont.",
   427  			tree.VolatilityImmutable),
   428  		makeAggOverload(
   429  			[]*types.T{types.FloatArray, types.Interval},
   430  			types.MakeArray(types.Interval),
   431  			newPercentileContAggregate,
   432  			"Implementation of percentile_cont.",
   433  			tree.VolatilityImmutable),
   434  	)),
   435  }
   436  
   437  // AnyNotNull is the name of the aggregate returned by NewAnyNotNullAggregate.
   438  const AnyNotNull = "any_not_null"
   439  
   440  func makePrivate(b builtinDefinition) builtinDefinition {
   441  	b.props.Private = true
   442  	return b
   443  }
   444  
   445  func makeAggOverload(
   446  	in []*types.T,
   447  	ret *types.T,
   448  	f func([]*types.T, *tree.EvalContext, tree.Datums) tree.AggregateFunc,
   449  	info string,
   450  	volatility tree.Volatility,
   451  ) tree.Overload {
   452  	return makeAggOverloadWithReturnType(
   453  		in,
   454  		tree.FixedReturnType(ret),
   455  		f,
   456  		info,
   457  		volatility,
   458  	)
   459  }
   460  
   461  func makeAggOverloadWithReturnType(
   462  	in []*types.T,
   463  	retType tree.ReturnTyper,
   464  	f func([]*types.T, *tree.EvalContext, tree.Datums) tree.AggregateFunc,
   465  	info string,
   466  	volatility tree.Volatility,
   467  ) tree.Overload {
   468  	argTypes := make(tree.ArgTypes, len(in))
   469  	for i, typ := range in {
   470  		argTypes[i].Name = fmt.Sprintf("arg%d", i+1)
   471  		argTypes[i].Typ = typ
   472  	}
   473  
   474  	return tree.Overload{
   475  		// See the comment about aggregate functions in the definitions
   476  		// of the Builtins array above.
   477  		Types:         argTypes,
   478  		ReturnType:    retType,
   479  		AggregateFunc: f,
   480  		WindowFunc: func(params []*types.T, evalCtx *tree.EvalContext) tree.WindowFunc {
   481  			aggWindowFunc := f(params, evalCtx, nil /* arguments */)
   482  			switch w := aggWindowFunc.(type) {
   483  			case *minAggregate:
   484  				min := &slidingWindowFunc{}
   485  				min.sw = makeSlidingWindow(evalCtx, func(evalCtx *tree.EvalContext, a, b tree.Datum) int {
   486  					return -a.Compare(evalCtx, b)
   487  				})
   488  				return min
   489  			case *maxAggregate:
   490  				max := &slidingWindowFunc{}
   491  				max.sw = makeSlidingWindow(evalCtx, func(evalCtx *tree.EvalContext, a, b tree.Datum) int {
   492  					return a.Compare(evalCtx, b)
   493  				})
   494  				return max
   495  			case *intSumAggregate:
   496  				return newSlidingWindowSumFunc(aggWindowFunc)
   497  			case *decimalSumAggregate:
   498  				return newSlidingWindowSumFunc(aggWindowFunc)
   499  			case *floatSumAggregate:
   500  				return newSlidingWindowSumFunc(aggWindowFunc)
   501  			case *intervalSumAggregate:
   502  				return newSlidingWindowSumFunc(aggWindowFunc)
   503  			case *avgAggregate:
   504  				// w.agg is a sum aggregate.
   505  				return &avgWindowFunc{sum: newSlidingWindowSumFunc(w.agg)}
   506  			}
   507  
   508  			return newFramableAggregateWindow(
   509  				aggWindowFunc,
   510  				func(evalCtx *tree.EvalContext, arguments tree.Datums) tree.AggregateFunc {
   511  					return f(params, evalCtx, arguments)
   512  				},
   513  			)
   514  		},
   515  		Info:       info,
   516  		Volatility: volatility,
   517  	}
   518  }
   519  
   520  func makeStdDevBuiltin() builtinDefinition {
   521  	return makeBuiltin(aggProps(),
   522  		makeAggOverload([]*types.T{types.Int}, types.Decimal, newIntStdDevAggregate,
   523  			"Calculates the standard deviation of the selected values.", tree.VolatilityImmutable),
   524  		makeAggOverload([]*types.T{types.Decimal}, types.Decimal, newDecimalStdDevAggregate,
   525  			"Calculates the standard deviation of the selected values.", tree.VolatilityImmutable),
   526  		makeAggOverload([]*types.T{types.Float}, types.Float, newFloatStdDevAggregate,
   527  			"Calculates the standard deviation of the selected values.", tree.VolatilityImmutable),
   528  	)
   529  }
   530  
   531  func makeVarianceBuiltin() builtinDefinition {
   532  	return makeBuiltin(aggProps(),
   533  		makeAggOverload([]*types.T{types.Int}, types.Decimal, newIntVarianceAggregate,
   534  			"Calculates the variance of the selected values.", tree.VolatilityImmutable),
   535  		makeAggOverload([]*types.T{types.Decimal}, types.Decimal, newDecimalVarianceAggregate,
   536  			"Calculates the variance of the selected values.", tree.VolatilityImmutable),
   537  		makeAggOverload([]*types.T{types.Float}, types.Float, newFloatVarianceAggregate,
   538  			"Calculates the variance of the selected values.", tree.VolatilityImmutable),
   539  	)
   540  }
   541  
   542  // builtinMustNotRun panics and indicates that a builtin cannot be run.
   543  func builtinMustNotRun(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
   544  	panic(fmt.Sprint("builtin must be overridden and cannot be run directly"))
   545  }
   546  
   547  var _ tree.AggregateFunc = &arrayAggregate{}
   548  var _ tree.AggregateFunc = &avgAggregate{}
   549  var _ tree.AggregateFunc = &corrAggregate{}
   550  var _ tree.AggregateFunc = &countAggregate{}
   551  var _ tree.AggregateFunc = &countRowsAggregate{}
   552  var _ tree.AggregateFunc = &maxAggregate{}
   553  var _ tree.AggregateFunc = &minAggregate{}
   554  var _ tree.AggregateFunc = &smallIntSumAggregate{}
   555  var _ tree.AggregateFunc = &intSumAggregate{}
   556  var _ tree.AggregateFunc = &decimalSumAggregate{}
   557  var _ tree.AggregateFunc = &floatSumAggregate{}
   558  var _ tree.AggregateFunc = &intervalSumAggregate{}
   559  var _ tree.AggregateFunc = &intSqrDiffAggregate{}
   560  var _ tree.AggregateFunc = &floatSqrDiffAggregate{}
   561  var _ tree.AggregateFunc = &decimalSqrDiffAggregate{}
   562  var _ tree.AggregateFunc = &floatSumSqrDiffsAggregate{}
   563  var _ tree.AggregateFunc = &decimalSumSqrDiffsAggregate{}
   564  var _ tree.AggregateFunc = &floatVarianceAggregate{}
   565  var _ tree.AggregateFunc = &decimalVarianceAggregate{}
   566  var _ tree.AggregateFunc = &floatStdDevAggregate{}
   567  var _ tree.AggregateFunc = &decimalStdDevAggregate{}
   568  var _ tree.AggregateFunc = &anyNotNullAggregate{}
   569  var _ tree.AggregateFunc = &concatAggregate{}
   570  var _ tree.AggregateFunc = &boolAndAggregate{}
   571  var _ tree.AggregateFunc = &boolOrAggregate{}
   572  var _ tree.AggregateFunc = &bytesXorAggregate{}
   573  var _ tree.AggregateFunc = &intXorAggregate{}
   574  var _ tree.AggregateFunc = &jsonAggregate{}
   575  var _ tree.AggregateFunc = &intBitAndAggregate{}
   576  var _ tree.AggregateFunc = &bitBitAndAggregate{}
   577  var _ tree.AggregateFunc = &intBitOrAggregate{}
   578  var _ tree.AggregateFunc = &bitBitOrAggregate{}
   579  var _ tree.AggregateFunc = &percentileDiscAggregate{}
   580  var _ tree.AggregateFunc = &percentileContAggregate{}
   581  
   582  const sizeOfArrayAggregate = int64(unsafe.Sizeof(arrayAggregate{}))
   583  const sizeOfAvgAggregate = int64(unsafe.Sizeof(avgAggregate{}))
   584  const sizeOfCorrAggregate = int64(unsafe.Sizeof(corrAggregate{}))
   585  const sizeOfCountAggregate = int64(unsafe.Sizeof(countAggregate{}))
   586  const sizeOfCountRowsAggregate = int64(unsafe.Sizeof(countRowsAggregate{}))
   587  const sizeOfMaxAggregate = int64(unsafe.Sizeof(maxAggregate{}))
   588  const sizeOfMinAggregate = int64(unsafe.Sizeof(minAggregate{}))
   589  const sizeOfSmallIntSumAggregate = int64(unsafe.Sizeof(smallIntSumAggregate{}))
   590  const sizeOfIntSumAggregate = int64(unsafe.Sizeof(intSumAggregate{}))
   591  const sizeOfDecimalSumAggregate = int64(unsafe.Sizeof(decimalSumAggregate{}))
   592  const sizeOfFloatSumAggregate = int64(unsafe.Sizeof(floatSumAggregate{}))
   593  const sizeOfIntervalSumAggregate = int64(unsafe.Sizeof(intervalSumAggregate{}))
   594  const sizeOfIntSqrDiffAggregate = int64(unsafe.Sizeof(intSqrDiffAggregate{}))
   595  const sizeOfFloatSqrDiffAggregate = int64(unsafe.Sizeof(floatSqrDiffAggregate{}))
   596  const sizeOfDecimalSqrDiffAggregate = int64(unsafe.Sizeof(decimalSqrDiffAggregate{}))
   597  const sizeOfFloatSumSqrDiffsAggregate = int64(unsafe.Sizeof(floatSumSqrDiffsAggregate{}))
   598  const sizeOfDecimalSumSqrDiffsAggregate = int64(unsafe.Sizeof(decimalSumSqrDiffsAggregate{}))
   599  const sizeOfFloatVarianceAggregate = int64(unsafe.Sizeof(floatVarianceAggregate{}))
   600  const sizeOfDecimalVarianceAggregate = int64(unsafe.Sizeof(decimalVarianceAggregate{}))
   601  const sizeOfFloatStdDevAggregate = int64(unsafe.Sizeof(floatStdDevAggregate{}))
   602  const sizeOfDecimalStdDevAggregate = int64(unsafe.Sizeof(decimalStdDevAggregate{}))
   603  const sizeOfAnyNotNullAggregate = int64(unsafe.Sizeof(anyNotNullAggregate{}))
   604  const sizeOfConcatAggregate = int64(unsafe.Sizeof(concatAggregate{}))
   605  const sizeOfBoolAndAggregate = int64(unsafe.Sizeof(boolAndAggregate{}))
   606  const sizeOfBoolOrAggregate = int64(unsafe.Sizeof(boolOrAggregate{}))
   607  const sizeOfBytesXorAggregate = int64(unsafe.Sizeof(bytesXorAggregate{}))
   608  const sizeOfIntXorAggregate = int64(unsafe.Sizeof(intXorAggregate{}))
   609  const sizeOfJSONAggregate = int64(unsafe.Sizeof(jsonAggregate{}))
   610  const sizeOfIntBitAndAggregate = int64(unsafe.Sizeof(intBitAndAggregate{}))
   611  const sizeOfBitBitAndAggregate = int64(unsafe.Sizeof(bitBitAndAggregate{}))
   612  const sizeOfIntBitOrAggregate = int64(unsafe.Sizeof(intBitOrAggregate{}))
   613  const sizeOfBitBitOrAggregate = int64(unsafe.Sizeof(bitBitOrAggregate{}))
   614  const sizeOfPercentileDiscAggregate = int64(unsafe.Sizeof(percentileDiscAggregate{}))
   615  const sizeOfPercentileContAggregate = int64(unsafe.Sizeof(percentileContAggregate{}))
   616  
   617  // singleDatumAggregateBase is a utility struct that helps aggregate builtins
   618  // that store a single datum internally track their memory usage related to
   619  // that single datum.
   620  // It will reuse tree.EvalCtx.SingleDatumAggMemAccount when non-nil and will
   621  // *not* close that account upon its closure; if it is nil, then a new memory
   622  // account will be created specifically for this struct and that account will
   623  // be closed upon this struct's closure.
   624  type singleDatumAggregateBase struct {
   625  	mode singleDatumAggregateBaseMode
   626  	acc  *mon.BoundAccount
   627  	// accountedFor indicates how much memory (in bytes) have been registered
   628  	// with acc.
   629  	accountedFor int64
   630  }
   631  
   632  // singleDatumAggregateBaseMode indicates the mode in which
   633  // singleDatumAggregateBase operates with regards to resetting and closing
   634  // behaviors.
   635  type singleDatumAggregateBaseMode int
   636  
   637  const (
   638  	// sharedSingleDatumAggregateBaseMode is a mode in which the memory account
   639  	// will be grown and shrunk according the corresponding aggregate builtin's
   640  	// memory usage, but the account will never be cleared or closed. In this
   641  	// mode, singleDatumAggregateBaseMode is *not* responsible for closing the
   642  	// memory account.
   643  	sharedSingleDatumAggregateBaseMode singleDatumAggregateBaseMode = iota
   644  	// nonSharedSingleDatumAggregateBaseMode is a mode in which the memory
   645  	// account is "owned" by singleDatumAggregateBase, so the account can be
   646  	// cleared and closed by it. In fact, singleDatumAggregateBase is
   647  	// responsible for the account's closure.
   648  	nonSharedSingleDatumAggregateBaseMode
   649  )
   650  
   651  // makeSingleDatumAggregateBase makes a new singleDatumAggregateBase. If
   652  // evalCtx has non-nil SingleDatumAggMemAccount field, then that memory account
   653  // will be used by the new struct which will operate in "shared" mode
   654  func makeSingleDatumAggregateBase(evalCtx *tree.EvalContext) singleDatumAggregateBase {
   655  	if evalCtx.SingleDatumAggMemAccount == nil {
   656  		newAcc := evalCtx.Mon.MakeBoundAccount()
   657  		return singleDatumAggregateBase{
   658  			mode: nonSharedSingleDatumAggregateBaseMode,
   659  			acc:  &newAcc,
   660  		}
   661  	}
   662  	return singleDatumAggregateBase{
   663  		mode: sharedSingleDatumAggregateBaseMode,
   664  		acc:  evalCtx.SingleDatumAggMemAccount,
   665  	}
   666  }
   667  
   668  // updateMemoryUsage updates the memory account to reflect the new memory
   669  // usage. If any memory has been previously registered with this struct, then
   670  // the account is updated only by the delta between previous and new usages,
   671  // otherwise, it is grown by newUsage.
   672  func (b *singleDatumAggregateBase) updateMemoryUsage(ctx context.Context, newUsage int64) error {
   673  	if err := b.acc.Grow(ctx, newUsage-b.accountedFor); err != nil {
   674  		return err
   675  	}
   676  	b.accountedFor = newUsage
   677  	return nil
   678  }
   679  
   680  func (b *singleDatumAggregateBase) reset(ctx context.Context) {
   681  	switch b.mode {
   682  	case sharedSingleDatumAggregateBaseMode:
   683  		b.acc.Shrink(ctx, b.accountedFor)
   684  		b.accountedFor = 0
   685  	case nonSharedSingleDatumAggregateBaseMode:
   686  		b.acc.Clear(ctx)
   687  	default:
   688  		panic(errors.Errorf("unexpected singleDatumAggregateBaseMode: %d", b.mode))
   689  	}
   690  }
   691  
   692  func (b *singleDatumAggregateBase) close(ctx context.Context) {
   693  	switch b.mode {
   694  	case sharedSingleDatumAggregateBaseMode:
   695  		b.acc.Shrink(ctx, b.accountedFor)
   696  		b.accountedFor = 0
   697  	case nonSharedSingleDatumAggregateBaseMode:
   698  		b.acc.Close(ctx)
   699  	default:
   700  		panic(errors.Errorf("unexpected singleDatumAggregateBaseMode: %d", b.mode))
   701  	}
   702  }
   703  
   704  // See NewAnyNotNullAggregate.
   705  type anyNotNullAggregate struct {
   706  	singleDatumAggregateBase
   707  
   708  	val tree.Datum
   709  }
   710  
   711  // NewAnyNotNullAggregate returns an aggregate function that returns an
   712  // arbitrary not-NULL value passed to Add (or NULL if no such value). This is
   713  // particularly useful for "passing through" values for columns which we know
   714  // are constant within any aggregation group (for example, the grouping columns
   715  // themselves).
   716  //
   717  // Note that NULL values do not affect the result of the aggregation; this is
   718  // important in a few different contexts:
   719  //
   720  //  - in distributed multi-stage aggregations, we can have a local stage with
   721  //    multiple (parallel) instances feeding into a final stage. If some of the
   722  //    instances see no rows, they emit a NULL into the final stage which needs
   723  //    to be ignored.
   724  //
   725  //  - for query optimization, when moving aggregations across left joins (which
   726  //    add NULL values).
   727  func NewAnyNotNullAggregate(evalCtx *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
   728  	return &anyNotNullAggregate{
   729  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
   730  		val:                      tree.DNull,
   731  	}
   732  }
   733  
   734  func newAnyNotNullAggregate(
   735  	_ []*types.T, evalCtx *tree.EvalContext, datums tree.Datums,
   736  ) tree.AggregateFunc {
   737  	return NewAnyNotNullAggregate(evalCtx, datums)
   738  }
   739  
   740  // Add sets the value to the passed datum.
   741  func (a *anyNotNullAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
   742  	if a.val == tree.DNull && datum != tree.DNull {
   743  		a.val = datum
   744  		if err := a.updateMemoryUsage(ctx, int64(datum.Size())); err != nil {
   745  			return err
   746  		}
   747  	}
   748  	return nil
   749  }
   750  
   751  // Result returns the value most recently passed to Add.
   752  func (a *anyNotNullAggregate) Result() (tree.Datum, error) {
   753  	return a.val, nil
   754  }
   755  
   756  // Reset implements tree.AggregateFunc interface.
   757  func (a *anyNotNullAggregate) Reset(ctx context.Context) {
   758  	a.val = tree.DNull
   759  	a.reset(ctx)
   760  }
   761  
   762  // Close is no-op in aggregates using constant space.
   763  func (a *anyNotNullAggregate) Close(ctx context.Context) {
   764  	a.close(ctx)
   765  }
   766  
   767  // Size is part of the tree.AggregateFunc interface.
   768  func (a *anyNotNullAggregate) Size() int64 {
   769  	return sizeOfAnyNotNullAggregate
   770  }
   771  
   772  type arrayAggregate struct {
   773  	arr *tree.DArray
   774  	// Note that we do not embed singleDatumAggregateBase struct to help with
   775  	// memory accounting because arrayAggregate stores multiple datums inside
   776  	// of arr.
   777  	acc mon.BoundAccount
   778  }
   779  
   780  func newArrayAggregate(
   781  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
   782  ) tree.AggregateFunc {
   783  	return &arrayAggregate{
   784  		arr: tree.NewDArray(params[0]),
   785  		acc: evalCtx.Mon.MakeBoundAccount(),
   786  	}
   787  }
   788  
   789  // Add accumulates the passed datum into the array.
   790  func (a *arrayAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
   791  	if err := a.acc.Grow(ctx, int64(datum.Size())); err != nil {
   792  		return err
   793  	}
   794  	return a.arr.Append(datum)
   795  }
   796  
   797  // Result returns a copy of the array of all datums passed to Add.
   798  func (a *arrayAggregate) Result() (tree.Datum, error) {
   799  	if len(a.arr.Array) > 0 {
   800  		arrCopy := *a.arr
   801  		return &arrCopy, nil
   802  	}
   803  	return tree.DNull, nil
   804  }
   805  
   806  // Reset implements tree.AggregateFunc interface.
   807  func (a *arrayAggregate) Reset(ctx context.Context) {
   808  	a.arr = tree.NewDArray(a.arr.ParamTyp)
   809  	a.acc.Empty(ctx)
   810  }
   811  
   812  // Close allows the aggregate to release the memory it requested during
   813  // operation.
   814  func (a *arrayAggregate) Close(ctx context.Context) {
   815  	a.acc.Close(ctx)
   816  }
   817  
   818  // Size is part of the tree.AggregateFunc interface.
   819  func (a *arrayAggregate) Size() int64 {
   820  	return sizeOfArrayAggregate
   821  }
   822  
   823  type avgAggregate struct {
   824  	agg   tree.AggregateFunc
   825  	count int
   826  }
   827  
   828  func newIntAvgAggregate(
   829  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   830  ) tree.AggregateFunc {
   831  	return &avgAggregate{agg: newIntSumAggregate(params, evalCtx, arguments)}
   832  }
   833  func newFloatAvgAggregate(
   834  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   835  ) tree.AggregateFunc {
   836  	return &avgAggregate{agg: newFloatSumAggregate(params, evalCtx, arguments)}
   837  }
   838  func newDecimalAvgAggregate(
   839  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   840  ) tree.AggregateFunc {
   841  	return &avgAggregate{agg: newDecimalSumAggregate(params, evalCtx, arguments)}
   842  }
   843  func newIntervalAvgAggregate(
   844  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   845  ) tree.AggregateFunc {
   846  	return &avgAggregate{agg: newIntervalSumAggregate(params, evalCtx, arguments)}
   847  }
   848  
   849  // Add accumulates the passed datum into the average.
   850  func (a *avgAggregate) Add(ctx context.Context, datum tree.Datum, other ...tree.Datum) error {
   851  	if datum == tree.DNull {
   852  		return nil
   853  	}
   854  	if err := a.agg.Add(ctx, datum); err != nil {
   855  		return err
   856  	}
   857  	a.count++
   858  	return nil
   859  }
   860  
   861  // Result returns the average of all datums passed to Add.
   862  func (a *avgAggregate) Result() (tree.Datum, error) {
   863  	sum, err := a.agg.Result()
   864  	if err != nil {
   865  		return nil, err
   866  	}
   867  	if sum == tree.DNull {
   868  		return sum, nil
   869  	}
   870  	switch t := sum.(type) {
   871  	case *tree.DFloat:
   872  		return tree.NewDFloat(*t / tree.DFloat(a.count)), nil
   873  	case *tree.DDecimal:
   874  		count := apd.New(int64(a.count), 0)
   875  		_, err := tree.DecimalCtx.Quo(&t.Decimal, &t.Decimal, count)
   876  		return t, err
   877  	case *tree.DInterval:
   878  		return &tree.DInterval{Duration: t.Duration.Div(int64(a.count))}, nil
   879  	default:
   880  		return nil, errors.AssertionFailedf("unexpected SUM result type: %s", t)
   881  	}
   882  }
   883  
   884  // Reset implements tree.AggregateFunc interface.
   885  func (a *avgAggregate) Reset(ctx context.Context) {
   886  	a.agg.Reset(ctx)
   887  	a.count = 0
   888  }
   889  
   890  // Close is part of the tree.AggregateFunc interface.
   891  func (a *avgAggregate) Close(ctx context.Context) {
   892  	a.agg.Close(ctx)
   893  }
   894  
   895  // Size is part of the tree.AggregateFunc interface.
   896  func (a *avgAggregate) Size() int64 {
   897  	return sizeOfAvgAggregate
   898  }
   899  
   900  type concatAggregate struct {
   901  	singleDatumAggregateBase
   902  
   903  	forBytes   bool
   904  	sawNonNull bool
   905  	delimiter  string // used for non window functions
   906  	result     bytes.Buffer
   907  }
   908  
   909  func newBytesConcatAggregate(
   910  	_ []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   911  ) tree.AggregateFunc {
   912  	concatAgg := &concatAggregate{
   913  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
   914  		forBytes:                 true,
   915  	}
   916  	if len(arguments) == 1 && arguments[0] != tree.DNull {
   917  		concatAgg.delimiter = string(tree.MustBeDBytes(arguments[0]))
   918  	} else if len(arguments) > 1 {
   919  		panic(fmt.Sprintf("too many arguments passed in, expected < 2, got %d", len(arguments)))
   920  	}
   921  	return concatAgg
   922  }
   923  
   924  func newStringConcatAggregate(
   925  	_ []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
   926  ) tree.AggregateFunc {
   927  	concatAgg := &concatAggregate{
   928  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
   929  	}
   930  	if len(arguments) == 1 && arguments[0] != tree.DNull {
   931  		concatAgg.delimiter = string(tree.MustBeDString(arguments[0]))
   932  	} else if len(arguments) > 1 {
   933  		panic(fmt.Sprintf("too many arguments passed in, expected < 2, got %d", len(arguments)))
   934  	}
   935  	return concatAgg
   936  }
   937  
   938  func (a *concatAggregate) Add(ctx context.Context, datum tree.Datum, others ...tree.Datum) error {
   939  	if datum == tree.DNull {
   940  		return nil
   941  	}
   942  	if !a.sawNonNull {
   943  		a.sawNonNull = true
   944  	} else {
   945  		delimiter := a.delimiter
   946  		// If this is called as part of a window function, the delimiter is passed in
   947  		// via the first element in others.
   948  		if len(others) == 1 && others[0] != tree.DNull {
   949  			if a.forBytes {
   950  				delimiter = string(tree.MustBeDBytes(others[0]))
   951  			} else {
   952  				delimiter = string(tree.MustBeDString(others[0]))
   953  			}
   954  		} else if len(others) > 1 {
   955  			panic(fmt.Sprintf("too many other datums passed in, expected < 2, got %d", len(others)))
   956  		}
   957  		if len(delimiter) > 0 {
   958  			a.result.WriteString(delimiter)
   959  		}
   960  	}
   961  	var arg string
   962  	if a.forBytes {
   963  		arg = string(tree.MustBeDBytes(datum))
   964  	} else {
   965  		arg = string(tree.MustBeDString(datum))
   966  	}
   967  	a.result.WriteString(arg)
   968  	if err := a.updateMemoryUsage(ctx, int64(a.result.Cap())); err != nil {
   969  		return err
   970  	}
   971  	return nil
   972  }
   973  
   974  func (a *concatAggregate) Result() (tree.Datum, error) {
   975  	if !a.sawNonNull {
   976  		return tree.DNull, nil
   977  	}
   978  	if a.forBytes {
   979  		res := tree.DBytes(a.result.String())
   980  		return &res, nil
   981  	}
   982  	res := tree.DString(a.result.String())
   983  	return &res, nil
   984  }
   985  
   986  // Reset implements tree.AggregateFunc interface.
   987  func (a *concatAggregate) Reset(ctx context.Context) {
   988  	a.sawNonNull = false
   989  	a.result.Reset()
   990  	// Note that a.result.Reset() does *not* release already allocated memory,
   991  	// so we do not reset singleDatumAggregateBase.
   992  }
   993  
   994  // Close allows the aggregate to release the memory it requested during
   995  // operation.
   996  func (a *concatAggregate) Close(ctx context.Context) {
   997  	a.close(ctx)
   998  }
   999  
  1000  // Size is part of the tree.AggregateFunc interface.
  1001  func (a *concatAggregate) Size() int64 {
  1002  	return sizeOfConcatAggregate
  1003  }
  1004  
  1005  type intBitAndAggregate struct {
  1006  	sawNonNull bool
  1007  	result     int64
  1008  }
  1009  
  1010  func newIntBitAndAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1011  	return &intBitAndAggregate{}
  1012  }
  1013  
  1014  // Add inserts one value into the running bitwise AND.
  1015  func (a *intBitAndAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1016  	if datum == tree.DNull {
  1017  		return nil
  1018  	}
  1019  	if !a.sawNonNull {
  1020  		// This is the first non-null datum, so we simply store
  1021  		// the provided value for the aggregation.
  1022  		a.result = int64(tree.MustBeDInt(datum))
  1023  		a.sawNonNull = true
  1024  		return nil
  1025  	}
  1026  	// This is not the first non-null datum, so we actually AND it with the
  1027  	// aggregate so far.
  1028  	a.result = a.result & int64(tree.MustBeDInt(datum))
  1029  	return nil
  1030  }
  1031  
  1032  // Result returns the bitwise AND.
  1033  func (a *intBitAndAggregate) Result() (tree.Datum, error) {
  1034  	if !a.sawNonNull {
  1035  		return tree.DNull, nil
  1036  	}
  1037  	return tree.NewDInt(tree.DInt(a.result)), nil
  1038  }
  1039  
  1040  // Reset implements tree.AggregateFunc interface.
  1041  func (a *intBitAndAggregate) Reset(context.Context) {
  1042  	a.sawNonNull = false
  1043  	a.result = 0
  1044  }
  1045  
  1046  // Close is part of the tree.AggregateFunc interface.
  1047  func (a *intBitAndAggregate) Close(context.Context) {}
  1048  
  1049  // Size is part of the tree.AggregateFunc interface.
  1050  func (a *intBitAndAggregate) Size() int64 {
  1051  	return sizeOfIntBitAndAggregate
  1052  }
  1053  
  1054  type bitBitAndAggregate struct {
  1055  	sawNonNull bool
  1056  	result     bitarray.BitArray
  1057  }
  1058  
  1059  func newBitBitAndAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1060  	return &bitBitAndAggregate{}
  1061  }
  1062  
  1063  // Add inserts one value into the running bitwise AND.
  1064  func (a *bitBitAndAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1065  	if datum == tree.DNull {
  1066  		return nil
  1067  	}
  1068  	bits := &tree.MustBeDBitArray(datum).BitArray
  1069  	if !a.sawNonNull {
  1070  		// This is the first non-null datum, so we simply store
  1071  		// the provided value for the aggregation.
  1072  		a.result = *bits
  1073  		a.sawNonNull = true
  1074  		return nil
  1075  	}
  1076  	// If the length of the current bit array is different from that of the
  1077  	// stored value, we return an error.
  1078  	if a.result.BitLen() != bits.BitLen() {
  1079  		return tree.NewCannotMixBitArraySizesError("AND")
  1080  	}
  1081  	// This is not the first non-null datum, so we actually AND it with the
  1082  	// aggregate so far.
  1083  	a.result = bitarray.And(a.result, *bits)
  1084  	return nil
  1085  }
  1086  
  1087  // Result returns the bitwise AND.
  1088  func (a *bitBitAndAggregate) Result() (tree.Datum, error) {
  1089  	if !a.sawNonNull {
  1090  		return tree.DNull, nil
  1091  	}
  1092  	return &tree.DBitArray{BitArray: a.result}, nil
  1093  }
  1094  
  1095  // Reset implements tree.AggregateFunc interface.
  1096  func (a *bitBitAndAggregate) Reset(context.Context) {
  1097  	a.sawNonNull = false
  1098  	a.result = bitarray.BitArray{}
  1099  }
  1100  
  1101  // Close is part of the tree.AggregateFunc interface.
  1102  func (a *bitBitAndAggregate) Close(context.Context) {}
  1103  
  1104  // Size is part of the tree.AggregateFunc interface.
  1105  func (a *bitBitAndAggregate) Size() int64 {
  1106  	return sizeOfBitBitAndAggregate
  1107  }
  1108  
  1109  type intBitOrAggregate struct {
  1110  	sawNonNull bool
  1111  	result     int64
  1112  }
  1113  
  1114  func newIntBitOrAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1115  	return &intBitOrAggregate{}
  1116  }
  1117  
  1118  // Add inserts one value into the running bitwise OR.
  1119  func (a *intBitOrAggregate) Add(
  1120  	_ context.Context, datum tree.Datum, otherArgs ...tree.Datum,
  1121  ) error {
  1122  	if datum == tree.DNull {
  1123  		return nil
  1124  	}
  1125  	if !a.sawNonNull {
  1126  		// This is the first non-null datum, so we simply store
  1127  		// the provided value for the aggregation.
  1128  		a.result = int64(tree.MustBeDInt(datum))
  1129  		a.sawNonNull = true
  1130  		return nil
  1131  	}
  1132  	// This is not the first non-null datum, so we actually OR it with the
  1133  	// aggregate so far.
  1134  	a.result = a.result | int64(tree.MustBeDInt(datum))
  1135  	return nil
  1136  }
  1137  
  1138  // Result returns the bitwise OR.
  1139  func (a *intBitOrAggregate) Result() (tree.Datum, error) {
  1140  	if !a.sawNonNull {
  1141  		return tree.DNull, nil
  1142  	}
  1143  	return tree.NewDInt(tree.DInt(a.result)), nil
  1144  }
  1145  
  1146  // Reset implements tree.AggregateFunc interface.
  1147  func (a *intBitOrAggregate) Reset(context.Context) {
  1148  	a.sawNonNull = false
  1149  	a.result = 0
  1150  }
  1151  
  1152  // Close is part of the tree.AggregateFunc interface.
  1153  func (a *intBitOrAggregate) Close(context.Context) {}
  1154  
  1155  // Size is part of the tree.AggregateFunc interface.
  1156  func (a *intBitOrAggregate) Size() int64 {
  1157  	return sizeOfIntBitOrAggregate
  1158  }
  1159  
  1160  type bitBitOrAggregate struct {
  1161  	sawNonNull bool
  1162  	result     bitarray.BitArray
  1163  }
  1164  
  1165  func newBitBitOrAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1166  	return &bitBitOrAggregate{}
  1167  }
  1168  
  1169  // Add inserts one value into the running bitwise OR.
  1170  func (a *bitBitOrAggregate) Add(
  1171  	_ context.Context, datum tree.Datum, otherArgs ...tree.Datum,
  1172  ) error {
  1173  	if datum == tree.DNull {
  1174  		return nil
  1175  	}
  1176  	bits := &tree.MustBeDBitArray(datum).BitArray
  1177  	if !a.sawNonNull {
  1178  		// This is the first non-null datum, so we simply store
  1179  		// the provided value for the aggregation.
  1180  		a.result = *bits
  1181  		a.sawNonNull = true
  1182  		return nil
  1183  	}
  1184  	// If the length of the current bit array is different from that of the
  1185  	// stored value, we return an error.
  1186  	if a.result.BitLen() != bits.BitLen() {
  1187  		return tree.NewCannotMixBitArraySizesError("OR")
  1188  	}
  1189  	// This is not the first non-null datum, so we actually OR it with the
  1190  	// aggregate so far.
  1191  	a.result = bitarray.Or(a.result, *bits)
  1192  	return nil
  1193  }
  1194  
  1195  // Result returns the bitwise OR.
  1196  func (a *bitBitOrAggregate) Result() (tree.Datum, error) {
  1197  	if !a.sawNonNull {
  1198  		return tree.DNull, nil
  1199  	}
  1200  	return &tree.DBitArray{BitArray: a.result}, nil
  1201  }
  1202  
  1203  // Reset implements tree.AggregateFunc interface.
  1204  func (a *bitBitOrAggregate) Reset(context.Context) {
  1205  	a.sawNonNull = false
  1206  	a.result = bitarray.BitArray{}
  1207  }
  1208  
  1209  // Close is part of the tree.AggregateFunc interface.
  1210  func (a *bitBitOrAggregate) Close(context.Context) {}
  1211  
  1212  // Size is part of the tree.AggregateFunc interface.
  1213  func (a *bitBitOrAggregate) Size() int64 {
  1214  	return sizeOfBitBitOrAggregate
  1215  }
  1216  
  1217  type boolAndAggregate struct {
  1218  	sawNonNull bool
  1219  	result     bool
  1220  }
  1221  
  1222  func newBoolAndAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1223  	return &boolAndAggregate{}
  1224  }
  1225  
  1226  func (a *boolAndAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1227  	if datum == tree.DNull {
  1228  		return nil
  1229  	}
  1230  	if !a.sawNonNull {
  1231  		a.sawNonNull = true
  1232  		a.result = true
  1233  	}
  1234  	a.result = a.result && bool(*datum.(*tree.DBool))
  1235  	return nil
  1236  }
  1237  
  1238  func (a *boolAndAggregate) Result() (tree.Datum, error) {
  1239  	if !a.sawNonNull {
  1240  		return tree.DNull, nil
  1241  	}
  1242  	return tree.MakeDBool(tree.DBool(a.result)), nil
  1243  }
  1244  
  1245  // Reset implements tree.AggregateFunc interface.
  1246  func (a *boolAndAggregate) Reset(context.Context) {
  1247  	a.sawNonNull = false
  1248  	a.result = false
  1249  }
  1250  
  1251  // Close is part of the tree.AggregateFunc interface.
  1252  func (a *boolAndAggregate) Close(context.Context) {}
  1253  
  1254  // Size is part of the tree.AggregateFunc interface.
  1255  func (a *boolAndAggregate) Size() int64 {
  1256  	return sizeOfBoolAndAggregate
  1257  }
  1258  
  1259  type boolOrAggregate struct {
  1260  	sawNonNull bool
  1261  	result     bool
  1262  }
  1263  
  1264  func newBoolOrAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1265  	return &boolOrAggregate{}
  1266  }
  1267  
  1268  func (a *boolOrAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1269  	if datum == tree.DNull {
  1270  		return nil
  1271  	}
  1272  	a.sawNonNull = true
  1273  	a.result = a.result || bool(*datum.(*tree.DBool))
  1274  	return nil
  1275  }
  1276  
  1277  func (a *boolOrAggregate) Result() (tree.Datum, error) {
  1278  	if !a.sawNonNull {
  1279  		return tree.DNull, nil
  1280  	}
  1281  	return tree.MakeDBool(tree.DBool(a.result)), nil
  1282  }
  1283  
  1284  // Reset implements tree.AggregateFunc interface.
  1285  func (a *boolOrAggregate) Reset(context.Context) {
  1286  	a.sawNonNull = false
  1287  	a.result = false
  1288  }
  1289  
  1290  // Close is part of the tree.AggregateFunc interface.
  1291  func (a *boolOrAggregate) Close(context.Context) {}
  1292  
  1293  // Size is part of the tree.AggregateFunc interface.
  1294  func (a *boolOrAggregate) Size() int64 {
  1295  	return sizeOfBoolOrAggregate
  1296  }
  1297  
  1298  // corrAggregate represents SQL:2003 correlation coefficient.
  1299  //
  1300  // n   be count of rows.
  1301  // sx  be the sum of the column of values of <independent variable expression>
  1302  // sx2 be the sum of the squares of values in the <independent variable expression> column
  1303  // sy  be the sum of the column of values of <dependent variable expression>
  1304  // sy2 be the sum of the squares of values in the <dependent variable expression> column
  1305  // sxy be the sum of the row-wise products of the value in the <independent variable expression>
  1306  //     column times the value in the <dependent variable expression> column.
  1307  //
  1308  // result:
  1309  //   1) If n*sx2 equals sx*sx, then the result is the null value.
  1310  //   2) If n*sy2 equals sy*sy, then the result is the null value.
  1311  //   3) Otherwise, the resut is SQRT(POWER(n*sxy-sx*sy,2) / ((n*sx2-sx*sx)*(n*sy2-sy*sy))).
  1312  //      If the exponent of the approximate mathematical result of the operation is not within
  1313  //      the implementation-defined exponent range for the result data type, then the result
  1314  //      is the null value.
  1315  type corrAggregate struct {
  1316  	n   int
  1317  	sx  float64
  1318  	sx2 float64
  1319  	sy  float64
  1320  	sy2 float64
  1321  	sxy float64
  1322  }
  1323  
  1324  func newCorrAggregate([]*types.T, *tree.EvalContext, tree.Datums) tree.AggregateFunc {
  1325  	return &corrAggregate{}
  1326  }
  1327  
  1328  // Add implements tree.AggregateFunc interface.
  1329  func (a *corrAggregate) Add(_ context.Context, datumY tree.Datum, otherArgs ...tree.Datum) error {
  1330  	if datumY == tree.DNull {
  1331  		return nil
  1332  	}
  1333  
  1334  	datumX := otherArgs[0]
  1335  	if datumX == tree.DNull {
  1336  		return nil
  1337  	}
  1338  
  1339  	x, err := a.float64Val(datumX)
  1340  	if err != nil {
  1341  		return err
  1342  	}
  1343  
  1344  	y, err := a.float64Val(datumY)
  1345  	if err != nil {
  1346  		return err
  1347  	}
  1348  
  1349  	a.n++
  1350  	a.sx += x
  1351  	a.sy += y
  1352  	a.sx2 += x * x
  1353  	a.sy2 += y * y
  1354  	a.sxy += x * y
  1355  
  1356  	if math.IsInf(a.sx, 0) ||
  1357  		math.IsInf(a.sx2, 0) ||
  1358  		math.IsInf(a.sy, 0) ||
  1359  		math.IsInf(a.sy2, 0) ||
  1360  		math.IsInf(a.sxy, 0) {
  1361  		return tree.ErrFloatOutOfRange
  1362  	}
  1363  
  1364  	return nil
  1365  }
  1366  
  1367  // Result implements tree.AggregateFunc interface.
  1368  func (a *corrAggregate) Result() (tree.Datum, error) {
  1369  	if a.n < 1 {
  1370  		return tree.DNull, nil
  1371  	}
  1372  
  1373  	if a.sx2 == 0 || a.sy2 == 0 {
  1374  		return tree.DNull, nil
  1375  	}
  1376  
  1377  	floatN := float64(a.n)
  1378  
  1379  	numeratorX := floatN*a.sx2 - a.sx*a.sx
  1380  	if math.IsInf(numeratorX, 0) {
  1381  		return tree.DNull, pgerror.New(pgcode.NumericValueOutOfRange, "float out of range")
  1382  	}
  1383  
  1384  	numeratorY := floatN*a.sy2 - a.sy*a.sy
  1385  	if math.IsInf(numeratorY, 0) {
  1386  		return tree.DNull, pgerror.New(pgcode.NumericValueOutOfRange, "float out of range")
  1387  	}
  1388  
  1389  	numeratorXY := floatN*a.sxy - a.sx*a.sy
  1390  	if math.IsInf(numeratorXY, 0) {
  1391  		return tree.DNull, pgerror.New(pgcode.NumericValueOutOfRange, "float out of range")
  1392  	}
  1393  
  1394  	if numeratorX <= 0 || numeratorY <= 0 {
  1395  		return tree.DNull, nil
  1396  	}
  1397  
  1398  	return tree.NewDFloat(tree.DFloat(numeratorXY / math.Sqrt(numeratorX*numeratorY))), nil
  1399  }
  1400  
  1401  // Reset implements tree.AggregateFunc interface.
  1402  func (a *corrAggregate) Reset(context.Context) {
  1403  	a.n = 0
  1404  	a.sx = 0
  1405  	a.sx2 = 0
  1406  	a.sy = 0
  1407  	a.sy2 = 0
  1408  	a.sxy = 0
  1409  }
  1410  
  1411  // Close implements tree.AggregateFunc interface.
  1412  func (a *corrAggregate) Close(context.Context) {}
  1413  
  1414  // Size implements tree.AggregateFunc interface.
  1415  func (a *corrAggregate) Size() int64 {
  1416  	return sizeOfCorrAggregate
  1417  }
  1418  
  1419  func (a *corrAggregate) float64Val(datum tree.Datum) (float64, error) {
  1420  	switch val := datum.(type) {
  1421  	case *tree.DFloat:
  1422  		return float64(*val), nil
  1423  	case *tree.DInt:
  1424  		return float64(*val), nil
  1425  	default:
  1426  		return 0, fmt.Errorf("invalid type %v", val)
  1427  	}
  1428  }
  1429  
  1430  type countAggregate struct {
  1431  	count int
  1432  }
  1433  
  1434  func newCountAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1435  	return &countAggregate{}
  1436  }
  1437  
  1438  func (a *countAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1439  	if datum == tree.DNull {
  1440  		return nil
  1441  	}
  1442  	a.count++
  1443  	return nil
  1444  }
  1445  
  1446  func (a *countAggregate) Result() (tree.Datum, error) {
  1447  	return tree.NewDInt(tree.DInt(a.count)), nil
  1448  }
  1449  
  1450  // Reset implements tree.AggregateFunc interface.
  1451  func (a *countAggregate) Reset(context.Context) {
  1452  	a.count = 0
  1453  }
  1454  
  1455  // Close is part of the tree.AggregateFunc interface.
  1456  func (a *countAggregate) Close(context.Context) {}
  1457  
  1458  // Size is part of the tree.AggregateFunc interface.
  1459  func (a *countAggregate) Size() int64 {
  1460  	return sizeOfCountAggregate
  1461  }
  1462  
  1463  type countRowsAggregate struct {
  1464  	count int
  1465  }
  1466  
  1467  func newCountRowsAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1468  	return &countRowsAggregate{}
  1469  }
  1470  
  1471  func (a *countRowsAggregate) Add(_ context.Context, _ tree.Datum, _ ...tree.Datum) error {
  1472  	a.count++
  1473  	return nil
  1474  }
  1475  
  1476  func (a *countRowsAggregate) Result() (tree.Datum, error) {
  1477  	return tree.NewDInt(tree.DInt(a.count)), nil
  1478  }
  1479  
  1480  // Reset implements tree.AggregateFunc interface.
  1481  func (a *countRowsAggregate) Reset(context.Context) {
  1482  	a.count = 0
  1483  }
  1484  
  1485  // Close is part of the tree.AggregateFunc interface.
  1486  func (a *countRowsAggregate) Close(context.Context) {}
  1487  
  1488  // Size is part of the tree.AggregateFunc interface.
  1489  func (a *countRowsAggregate) Size() int64 {
  1490  	return sizeOfCountRowsAggregate
  1491  }
  1492  
  1493  // maxAggregate keeps track of the largest value passed to Add.
  1494  type maxAggregate struct {
  1495  	singleDatumAggregateBase
  1496  
  1497  	max               tree.Datum
  1498  	evalCtx           *tree.EvalContext
  1499  	variableDatumSize bool
  1500  }
  1501  
  1502  func newMaxAggregate(
  1503  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  1504  ) tree.AggregateFunc {
  1505  	_, variable := tree.DatumTypeSize(params[0])
  1506  	// If the datum type has a variable size, the memory account will be
  1507  	// updated accordingly on every change to the current "max" value, but if
  1508  	// it has a fixed size, the memory account will be updated only on the
  1509  	// first non-null datum.
  1510  	return &maxAggregate{
  1511  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
  1512  		evalCtx:                  evalCtx,
  1513  		variableDatumSize:        variable,
  1514  	}
  1515  }
  1516  
  1517  // Add sets the max to the larger of the current max or the passed datum.
  1518  func (a *maxAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1519  	if datum == tree.DNull {
  1520  		return nil
  1521  	}
  1522  	if a.max == nil {
  1523  		if err := a.updateMemoryUsage(ctx, int64(datum.Size())); err != nil {
  1524  			return err
  1525  		}
  1526  		a.max = datum
  1527  		return nil
  1528  	}
  1529  	c := a.max.Compare(a.evalCtx, datum)
  1530  	if c < 0 {
  1531  		a.max = datum
  1532  		if a.variableDatumSize {
  1533  			if err := a.updateMemoryUsage(ctx, int64(datum.Size())); err != nil {
  1534  				return err
  1535  			}
  1536  		}
  1537  	}
  1538  	return nil
  1539  }
  1540  
  1541  // Result returns the largest value passed to Add.
  1542  func (a *maxAggregate) Result() (tree.Datum, error) {
  1543  	if a.max == nil {
  1544  		return tree.DNull, nil
  1545  	}
  1546  	return a.max, nil
  1547  }
  1548  
  1549  // Reset implements tree.AggregateFunc interface.
  1550  func (a *maxAggregate) Reset(ctx context.Context) {
  1551  	a.max = nil
  1552  	a.reset(ctx)
  1553  }
  1554  
  1555  // Close is part of the tree.AggregateFunc interface.
  1556  func (a *maxAggregate) Close(ctx context.Context) {
  1557  	a.close(ctx)
  1558  }
  1559  
  1560  // Size is part of the tree.AggregateFunc interface.
  1561  func (a *maxAggregate) Size() int64 {
  1562  	return sizeOfMaxAggregate
  1563  }
  1564  
  1565  // minAggregate keeps track of the smallest value passed to Add.
  1566  type minAggregate struct {
  1567  	singleDatumAggregateBase
  1568  
  1569  	min               tree.Datum
  1570  	evalCtx           *tree.EvalContext
  1571  	variableDatumSize bool
  1572  }
  1573  
  1574  func newMinAggregate(
  1575  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  1576  ) tree.AggregateFunc {
  1577  	_, variable := tree.DatumTypeSize(params[0])
  1578  	// If the datum type has a variable size, the memory account will be
  1579  	// updated accordingly on every change to the current "min" value, but if
  1580  	// it has a fixed size, the memory account will be updated only on the
  1581  	// first non-null datum.
  1582  	return &minAggregate{
  1583  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
  1584  		evalCtx:                  evalCtx,
  1585  		variableDatumSize:        variable,
  1586  	}
  1587  }
  1588  
  1589  // Add sets the min to the smaller of the current min or the passed datum.
  1590  func (a *minAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1591  	if datum == tree.DNull {
  1592  		return nil
  1593  	}
  1594  	if a.min == nil {
  1595  		if err := a.updateMemoryUsage(ctx, int64(datum.Size())); err != nil {
  1596  			return err
  1597  		}
  1598  		a.min = datum
  1599  		return nil
  1600  	}
  1601  	c := a.min.Compare(a.evalCtx, datum)
  1602  	if c > 0 {
  1603  		a.min = datum
  1604  		if a.variableDatumSize {
  1605  			if err := a.updateMemoryUsage(ctx, int64(datum.Size())); err != nil {
  1606  				return err
  1607  			}
  1608  		}
  1609  	}
  1610  	return nil
  1611  }
  1612  
  1613  // Result returns the smallest value passed to Add.
  1614  func (a *minAggregate) Result() (tree.Datum, error) {
  1615  	if a.min == nil {
  1616  		return tree.DNull, nil
  1617  	}
  1618  	return a.min, nil
  1619  }
  1620  
  1621  // Reset implements tree.AggregateFunc interface.
  1622  func (a *minAggregate) Reset(ctx context.Context) {
  1623  	a.min = nil
  1624  	a.reset(ctx)
  1625  }
  1626  
  1627  // Close is part of the tree.AggregateFunc interface.
  1628  func (a *minAggregate) Close(ctx context.Context) {
  1629  	a.close(ctx)
  1630  }
  1631  
  1632  // Size is part of the tree.AggregateFunc interface.
  1633  func (a *minAggregate) Size() int64 {
  1634  	return sizeOfMinAggregate
  1635  }
  1636  
  1637  type smallIntSumAggregate struct {
  1638  	sum         int64
  1639  	seenNonNull bool
  1640  }
  1641  
  1642  func newSmallIntSumAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1643  	return &smallIntSumAggregate{}
  1644  }
  1645  
  1646  // Add adds the value of the passed datum to the sum.
  1647  func (a *smallIntSumAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1648  	if datum == tree.DNull {
  1649  		return nil
  1650  	}
  1651  
  1652  	var ok bool
  1653  	a.sum, ok = arith.AddWithOverflow(a.sum, int64(tree.MustBeDInt(datum)))
  1654  	if !ok {
  1655  		return tree.ErrIntOutOfRange
  1656  	}
  1657  	a.seenNonNull = true
  1658  	return nil
  1659  }
  1660  
  1661  // Result returns the sum.
  1662  func (a *smallIntSumAggregate) Result() (tree.Datum, error) {
  1663  	if !a.seenNonNull {
  1664  		return tree.DNull, nil
  1665  	}
  1666  	return tree.NewDInt(tree.DInt(a.sum)), nil
  1667  }
  1668  
  1669  // Reset implements tree.AggregateFunc interface.
  1670  func (a *smallIntSumAggregate) Reset(context.Context) {
  1671  	a.sum = 0
  1672  	a.seenNonNull = false
  1673  }
  1674  
  1675  // Close is part of the tree.AggregateFunc interface.
  1676  func (a *smallIntSumAggregate) Close(context.Context) {}
  1677  
  1678  // Size is part of the tree.AggregateFunc interface.
  1679  func (a *smallIntSumAggregate) Size() int64 {
  1680  	return sizeOfSmallIntSumAggregate
  1681  }
  1682  
  1683  type intSumAggregate struct {
  1684  	singleDatumAggregateBase
  1685  
  1686  	// Either the `intSum` and `decSum` fields contains the
  1687  	// result. Which one is used is determined by the `large` field
  1688  	// below.
  1689  	intSum      int64
  1690  	decSum      apd.Decimal
  1691  	tmpDec      apd.Decimal
  1692  	large       bool
  1693  	seenNonNull bool
  1694  }
  1695  
  1696  func newIntSumAggregate(_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1697  	return &intSumAggregate{singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx)}
  1698  }
  1699  
  1700  // Add adds the value of the passed datum to the sum.
  1701  func (a *intSumAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1702  	if datum == tree.DNull {
  1703  		return nil
  1704  	}
  1705  
  1706  	t := int64(tree.MustBeDInt(datum))
  1707  	if t != 0 {
  1708  		// The sum can be computed using a single int64 as long as the
  1709  		// result of the addition does not overflow.  However since Go
  1710  		// does not provide checked addition, we have to check for the
  1711  		// overflow explicitly.
  1712  		if !a.large {
  1713  			r, ok := arith.AddWithOverflow(a.intSum, t)
  1714  			if ok {
  1715  				a.intSum = r
  1716  			} else {
  1717  				// And overflow was detected; go to large integers, but keep the
  1718  				// sum computed so far.
  1719  				a.large = true
  1720  				a.decSum.SetFinite(a.intSum, 0)
  1721  			}
  1722  		}
  1723  
  1724  		if a.large {
  1725  			a.tmpDec.SetFinite(t, 0)
  1726  			_, err := tree.ExactCtx.Add(&a.decSum, &a.decSum, &a.tmpDec)
  1727  			if err != nil {
  1728  				return err
  1729  			}
  1730  			if err := a.updateMemoryUsage(ctx, int64(tree.SizeOfDecimal(a.decSum))); err != nil {
  1731  				return err
  1732  			}
  1733  		}
  1734  	}
  1735  	a.seenNonNull = true
  1736  	return nil
  1737  }
  1738  
  1739  // Result returns the sum.
  1740  func (a *intSumAggregate) Result() (tree.Datum, error) {
  1741  	if !a.seenNonNull {
  1742  		return tree.DNull, nil
  1743  	}
  1744  	dd := &tree.DDecimal{}
  1745  	if a.large {
  1746  		dd.Set(&a.decSum)
  1747  	} else {
  1748  		dd.SetFinite(a.intSum, 0)
  1749  	}
  1750  	return dd, nil
  1751  }
  1752  
  1753  // Reset implements tree.AggregateFunc interface.
  1754  func (a *intSumAggregate) Reset(context.Context) {
  1755  	// We choose not to reset apd.Decimal's since they will be set to
  1756  	// appropriate values when overflow occurs - we simply force the aggregate
  1757  	// to use Go types (at least, at first). That's why we also not reset the
  1758  	// singleDatumAggregateBase.
  1759  	a.seenNonNull = false
  1760  	a.intSum = 0
  1761  	a.large = false
  1762  }
  1763  
  1764  // Close is part of the tree.AggregateFunc interface.
  1765  func (a *intSumAggregate) Close(ctx context.Context) {
  1766  	a.close(ctx)
  1767  }
  1768  
  1769  // Size is part of the tree.AggregateFunc interface.
  1770  func (a *intSumAggregate) Size() int64 {
  1771  	return sizeOfIntSumAggregate
  1772  }
  1773  
  1774  type decimalSumAggregate struct {
  1775  	singleDatumAggregateBase
  1776  
  1777  	sum        apd.Decimal
  1778  	sawNonNull bool
  1779  }
  1780  
  1781  func newDecimalSumAggregate(
  1782  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  1783  ) tree.AggregateFunc {
  1784  	return &decimalSumAggregate{singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx)}
  1785  }
  1786  
  1787  // Add adds the value of the passed datum to the sum.
  1788  func (a *decimalSumAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1789  	if datum == tree.DNull {
  1790  		return nil
  1791  	}
  1792  	t := datum.(*tree.DDecimal)
  1793  	_, err := tree.ExactCtx.Add(&a.sum, &a.sum, &t.Decimal)
  1794  	if err != nil {
  1795  		return err
  1796  	}
  1797  
  1798  	if err := a.updateMemoryUsage(ctx, int64(tree.SizeOfDecimal(a.sum))); err != nil {
  1799  		return err
  1800  	}
  1801  
  1802  	a.sawNonNull = true
  1803  	return nil
  1804  }
  1805  
  1806  // Result returns the sum.
  1807  func (a *decimalSumAggregate) Result() (tree.Datum, error) {
  1808  	if !a.sawNonNull {
  1809  		return tree.DNull, nil
  1810  	}
  1811  	dd := &tree.DDecimal{}
  1812  	dd.Set(&a.sum)
  1813  	return dd, nil
  1814  }
  1815  
  1816  // Reset implements tree.AggregateFunc interface.
  1817  func (a *decimalSumAggregate) Reset(ctx context.Context) {
  1818  	a.sum.SetFinite(0, 0)
  1819  	a.sawNonNull = false
  1820  	a.reset(ctx)
  1821  }
  1822  
  1823  // Close is part of the tree.AggregateFunc interface.
  1824  func (a *decimalSumAggregate) Close(ctx context.Context) {
  1825  	a.close(ctx)
  1826  }
  1827  
  1828  // Size is part of the tree.AggregateFunc interface.
  1829  func (a *decimalSumAggregate) Size() int64 {
  1830  	return sizeOfDecimalSumAggregate
  1831  }
  1832  
  1833  type floatSumAggregate struct {
  1834  	sum        float64
  1835  	sawNonNull bool
  1836  }
  1837  
  1838  func newFloatSumAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1839  	return &floatSumAggregate{}
  1840  }
  1841  
  1842  // Add adds the value of the passed datum to the sum.
  1843  func (a *floatSumAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1844  	if datum == tree.DNull {
  1845  		return nil
  1846  	}
  1847  	t := datum.(*tree.DFloat)
  1848  	a.sum += float64(*t)
  1849  	a.sawNonNull = true
  1850  	return nil
  1851  }
  1852  
  1853  // Result returns the sum.
  1854  func (a *floatSumAggregate) Result() (tree.Datum, error) {
  1855  	if !a.sawNonNull {
  1856  		return tree.DNull, nil
  1857  	}
  1858  	return tree.NewDFloat(tree.DFloat(a.sum)), nil
  1859  }
  1860  
  1861  // Reset implements tree.AggregateFunc interface.
  1862  func (a *floatSumAggregate) Reset(context.Context) {
  1863  	a.sawNonNull = false
  1864  	a.sum = 0
  1865  }
  1866  
  1867  // Close is part of the tree.AggregateFunc interface.
  1868  func (a *floatSumAggregate) Close(context.Context) {}
  1869  
  1870  // Size is part of the tree.AggregateFunc interface.
  1871  func (a *floatSumAggregate) Size() int64 {
  1872  	return sizeOfFloatSumAggregate
  1873  }
  1874  
  1875  type intervalSumAggregate struct {
  1876  	sum        duration.Duration
  1877  	sawNonNull bool
  1878  }
  1879  
  1880  func newIntervalSumAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1881  	return &intervalSumAggregate{}
  1882  }
  1883  
  1884  // Add adds the value of the passed datum to the sum.
  1885  func (a *intervalSumAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1886  	if datum == tree.DNull {
  1887  		return nil
  1888  	}
  1889  	t := datum.(*tree.DInterval).Duration
  1890  	a.sum = a.sum.Add(t)
  1891  	a.sawNonNull = true
  1892  	return nil
  1893  }
  1894  
  1895  // Result returns the sum.
  1896  func (a *intervalSumAggregate) Result() (tree.Datum, error) {
  1897  	if !a.sawNonNull {
  1898  		return tree.DNull, nil
  1899  	}
  1900  	return &tree.DInterval{Duration: a.sum}, nil
  1901  }
  1902  
  1903  // Reset implements tree.AggregateFunc interface.
  1904  func (a *intervalSumAggregate) Reset(context.Context) {
  1905  	a.sum = a.sum.Sub(a.sum)
  1906  	a.sawNonNull = false
  1907  }
  1908  
  1909  // Close is part of the tree.AggregateFunc interface.
  1910  func (a *intervalSumAggregate) Close(context.Context) {}
  1911  
  1912  // Size is part of the tree.AggregateFunc interface.
  1913  func (a *intervalSumAggregate) Size() int64 {
  1914  	return sizeOfIntervalSumAggregate
  1915  }
  1916  
  1917  // Read-only constants used for square difference computations.
  1918  var (
  1919  	decimalOne = apd.New(1, 0)
  1920  	decimalTwo = apd.New(2, 0)
  1921  )
  1922  
  1923  type intSqrDiffAggregate struct {
  1924  	agg decimalSqrDiff
  1925  	// Used for passing int64s as *apd.Decimal values.
  1926  	tmpDec tree.DDecimal
  1927  }
  1928  
  1929  func newIntSqrDiff(evalCtx *tree.EvalContext) decimalSqrDiff {
  1930  	return &intSqrDiffAggregate{agg: newDecimalSqrDiff(evalCtx)}
  1931  }
  1932  
  1933  func newIntSqrDiffAggregate(
  1934  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  1935  ) tree.AggregateFunc {
  1936  	return newIntSqrDiff(evalCtx)
  1937  }
  1938  
  1939  // Count is part of the decimalSqrDiff interface.
  1940  func (a *intSqrDiffAggregate) Count() *apd.Decimal {
  1941  	return a.agg.Count()
  1942  }
  1943  
  1944  // Tmp is part of the decimalSqrDiff interface.
  1945  func (a *intSqrDiffAggregate) Tmp() *apd.Decimal {
  1946  	return a.agg.Tmp()
  1947  }
  1948  
  1949  func (a *intSqrDiffAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1950  	if datum == tree.DNull {
  1951  		return nil
  1952  	}
  1953  
  1954  	a.tmpDec.SetFinite(int64(tree.MustBeDInt(datum)), 0)
  1955  	return a.agg.Add(ctx, &a.tmpDec)
  1956  }
  1957  
  1958  func (a *intSqrDiffAggregate) Result() (tree.Datum, error) {
  1959  	return a.agg.Result()
  1960  }
  1961  
  1962  // Reset implements tree.AggregateFunc interface.
  1963  func (a *intSqrDiffAggregate) Reset(ctx context.Context) {
  1964  	a.agg.Reset(ctx)
  1965  }
  1966  
  1967  // Close is part of the tree.AggregateFunc interface.
  1968  func (a *intSqrDiffAggregate) Close(ctx context.Context) {
  1969  	a.agg.Close(ctx)
  1970  }
  1971  
  1972  // Size is part of the tree.AggregateFunc interface.
  1973  func (a *intSqrDiffAggregate) Size() int64 {
  1974  	return sizeOfIntSqrDiffAggregate
  1975  }
  1976  
  1977  type floatSqrDiffAggregate struct {
  1978  	count   int64
  1979  	mean    float64
  1980  	sqrDiff float64
  1981  }
  1982  
  1983  func newFloatSqrDiff() floatSqrDiff {
  1984  	return &floatSqrDiffAggregate{}
  1985  }
  1986  
  1987  func newFloatSqrDiffAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  1988  	return newFloatSqrDiff()
  1989  }
  1990  
  1991  // Count is part of the floatSqrDiff interface.
  1992  func (a *floatSqrDiffAggregate) Count() int64 {
  1993  	return a.count
  1994  }
  1995  
  1996  func (a *floatSqrDiffAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  1997  	if datum == tree.DNull {
  1998  		return nil
  1999  	}
  2000  	f := float64(*datum.(*tree.DFloat))
  2001  
  2002  	// Uses the Knuth/Welford method for accurately computing squared difference online in a
  2003  	// single pass. Refer to squared difference calculations
  2004  	// in http://www.johndcook.com/blog/standard_deviation/ and
  2005  	// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm.
  2006  	a.count++
  2007  	delta := f - a.mean
  2008  	// We are converting an int64 number (with 63-bit precision)
  2009  	// to a float64 (with 52-bit precision), thus in the worst cases,
  2010  	// we may lose up to 11 bits of precision. This was deemed acceptable
  2011  	// considering that we are losing 11 bits on a 52+-bit operation and
  2012  	// that users dealing with floating points should be aware
  2013  	// of floating-point imprecision.
  2014  	a.mean += delta / float64(a.count)
  2015  	a.sqrDiff += delta * (f - a.mean)
  2016  	return nil
  2017  }
  2018  
  2019  func (a *floatSqrDiffAggregate) Result() (tree.Datum, error) {
  2020  	if a.count < 1 {
  2021  		return tree.DNull, nil
  2022  	}
  2023  	return tree.NewDFloat(tree.DFloat(a.sqrDiff)), nil
  2024  }
  2025  
  2026  // Reset implements tree.AggregateFunc interface.
  2027  func (a *floatSqrDiffAggregate) Reset(context.Context) {
  2028  	a.count = 0
  2029  	a.mean = 0
  2030  	a.sqrDiff = 0
  2031  }
  2032  
  2033  // Close is part of the tree.AggregateFunc interface.
  2034  func (a *floatSqrDiffAggregate) Close(context.Context) {}
  2035  
  2036  // Size is part of the tree.AggregateFunc interface.
  2037  func (a *floatSqrDiffAggregate) Size() int64 {
  2038  	return sizeOfFloatSqrDiffAggregate
  2039  }
  2040  
  2041  type decimalSqrDiffAggregate struct {
  2042  	singleDatumAggregateBase
  2043  
  2044  	// Variables used across iterations.
  2045  	ed      *apd.ErrDecimal
  2046  	count   apd.Decimal
  2047  	mean    apd.Decimal
  2048  	sqrDiff apd.Decimal
  2049  
  2050  	// Variables used as scratch space within iterations.
  2051  	delta apd.Decimal
  2052  	tmp   apd.Decimal
  2053  }
  2054  
  2055  func newDecimalSqrDiff(evalCtx *tree.EvalContext) decimalSqrDiff {
  2056  	ed := apd.MakeErrDecimal(tree.IntermediateCtx)
  2057  	return &decimalSqrDiffAggregate{
  2058  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
  2059  		ed:                       &ed,
  2060  	}
  2061  }
  2062  
  2063  func newDecimalSqrDiffAggregate(
  2064  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2065  ) tree.AggregateFunc {
  2066  	return newDecimalSqrDiff(evalCtx)
  2067  }
  2068  
  2069  // Count is part of the decimalSqrDiff interface.
  2070  func (a *decimalSqrDiffAggregate) Count() *apd.Decimal {
  2071  	return &a.count
  2072  }
  2073  
  2074  // Tmp is part of the decimalSqrDiff interface.
  2075  func (a *decimalSqrDiffAggregate) Tmp() *apd.Decimal {
  2076  	return &a.tmp
  2077  }
  2078  
  2079  func (a *decimalSqrDiffAggregate) Add(
  2080  	ctx context.Context, datum tree.Datum, _ ...tree.Datum,
  2081  ) error {
  2082  	if datum == tree.DNull {
  2083  		return nil
  2084  	}
  2085  	d := &datum.(*tree.DDecimal).Decimal
  2086  
  2087  	// Uses the Knuth/Welford method for accurately computing squared difference online in a
  2088  	// single pass. Refer to squared difference calculations
  2089  	// in http://www.johndcook.com/blog/standard_deviation/ and
  2090  	// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm.
  2091  	a.ed.Add(&a.count, &a.count, decimalOne)
  2092  	a.ed.Sub(&a.delta, d, &a.mean)
  2093  	a.ed.Quo(&a.tmp, &a.delta, &a.count)
  2094  	a.ed.Add(&a.mean, &a.mean, &a.tmp)
  2095  	a.ed.Sub(&a.tmp, d, &a.mean)
  2096  	a.ed.Add(&a.sqrDiff, &a.sqrDiff, a.ed.Mul(&a.delta, &a.delta, &a.tmp))
  2097  
  2098  	size := int64(tree.SizeOfDecimal(a.count) +
  2099  		tree.SizeOfDecimal(a.mean) +
  2100  		tree.SizeOfDecimal(a.sqrDiff) +
  2101  		tree.SizeOfDecimal(a.delta) +
  2102  		tree.SizeOfDecimal(a.tmp))
  2103  	if err := a.updateMemoryUsage(ctx, size); err != nil {
  2104  		return err
  2105  	}
  2106  
  2107  	return a.ed.Err()
  2108  }
  2109  
  2110  func (a *decimalSqrDiffAggregate) Result() (tree.Datum, error) {
  2111  	if a.count.Cmp(decimalOne) < 0 {
  2112  		return tree.DNull, nil
  2113  	}
  2114  	dd := &tree.DDecimal{Decimal: a.sqrDiff}
  2115  	// Remove trailing zeros. Depending on the order in which the input
  2116  	// is processed, some number of trailing zeros could be added to the
  2117  	// output. Remove them so that the results are the same regardless of order.
  2118  	dd.Decimal.Reduce(&dd.Decimal)
  2119  	return dd, nil
  2120  }
  2121  
  2122  // Reset implements tree.AggregateFunc interface.
  2123  func (a *decimalSqrDiffAggregate) Reset(ctx context.Context) {
  2124  	a.count.SetFinite(0, 0)
  2125  	a.mean.SetFinite(0, 0)
  2126  	a.sqrDiff.SetFinite(0, 0)
  2127  	a.reset(ctx)
  2128  }
  2129  
  2130  // Close is part of the tree.AggregateFunc interface.
  2131  func (a *decimalSqrDiffAggregate) Close(ctx context.Context) {
  2132  	a.close(ctx)
  2133  }
  2134  
  2135  // Size is part of the tree.AggregateFunc interface.
  2136  func (a *decimalSqrDiffAggregate) Size() int64 {
  2137  	return sizeOfDecimalSqrDiffAggregate
  2138  }
  2139  
  2140  type floatSumSqrDiffsAggregate struct {
  2141  	count   int64
  2142  	mean    float64
  2143  	sqrDiff float64
  2144  }
  2145  
  2146  func newFloatSumSqrDiffs() floatSqrDiff {
  2147  	return &floatSumSqrDiffsAggregate{}
  2148  }
  2149  
  2150  func (a *floatSumSqrDiffsAggregate) Count() int64 {
  2151  	return a.count
  2152  }
  2153  
  2154  // The signature for the datums is:
  2155  //   SQRDIFF (float), SUM (float), COUNT(int)
  2156  func (a *floatSumSqrDiffsAggregate) Add(
  2157  	_ context.Context, sqrDiffD tree.Datum, otherArgs ...tree.Datum,
  2158  ) error {
  2159  	sumD := otherArgs[0]
  2160  	countD := otherArgs[1]
  2161  	if sqrDiffD == tree.DNull || sumD == tree.DNull || countD == tree.DNull {
  2162  		return nil
  2163  	}
  2164  
  2165  	sqrDiff := float64(*sqrDiffD.(*tree.DFloat))
  2166  	sum := float64(*sumD.(*tree.DFloat))
  2167  	count := int64(*countD.(*tree.DInt))
  2168  
  2169  	mean := sum / float64(count)
  2170  	delta := mean - a.mean
  2171  
  2172  	// Compute the sum of Knuth/Welford sum of squared differences from the
  2173  	// mean in a single pass. Adapted from sum of RunningStats in
  2174  	// https://www.johndcook.com/blog/skewness_kurtosis and our
  2175  	// implementation of NumericStats
  2176  	// https://github.com/cockroachdb/cockroach/pull/17728.
  2177  	totalCount, ok := arith.AddWithOverflow(a.count, count)
  2178  	if !ok {
  2179  		return pgerror.Newf(pgcode.NumericValueOutOfRange,
  2180  			"number of values in aggregate exceed max count of %d", math.MaxInt64,
  2181  		)
  2182  	}
  2183  	// We are converting an int64 number (with 63-bit precision)
  2184  	// to a float64 (with 52-bit precision), thus in the worst cases,
  2185  	// we may lose up to 11 bits of precision. This was deemed acceptable
  2186  	// considering that we are losing 11 bits on a 52+-bit operation and
  2187  	// that users dealing with floating points should be aware
  2188  	// of floating-point imprecision.
  2189  	a.sqrDiff += sqrDiff + delta*delta*float64(count)*float64(a.count)/float64(totalCount)
  2190  	a.count = totalCount
  2191  	a.mean += delta * float64(count) / float64(a.count)
  2192  	return nil
  2193  }
  2194  
  2195  func (a *floatSumSqrDiffsAggregate) Result() (tree.Datum, error) {
  2196  	if a.count < 1 {
  2197  		return tree.DNull, nil
  2198  	}
  2199  	return tree.NewDFloat(tree.DFloat(a.sqrDiff)), nil
  2200  }
  2201  
  2202  // Reset implements tree.AggregateFunc interface.
  2203  func (a *floatSumSqrDiffsAggregate) Reset(context.Context) {
  2204  	a.count = 0
  2205  	a.mean = 0
  2206  	a.sqrDiff = 0
  2207  }
  2208  
  2209  // Close is part of the tree.AggregateFunc interface.
  2210  func (a *floatSumSqrDiffsAggregate) Close(context.Context) {}
  2211  
  2212  // Size is part of the tree.AggregateFunc interface.
  2213  func (a *floatSumSqrDiffsAggregate) Size() int64 {
  2214  	return sizeOfFloatSumSqrDiffsAggregate
  2215  }
  2216  
  2217  type decimalSumSqrDiffsAggregate struct {
  2218  	singleDatumAggregateBase
  2219  
  2220  	// Variables used across iterations.
  2221  	ed      *apd.ErrDecimal
  2222  	count   apd.Decimal
  2223  	mean    apd.Decimal
  2224  	sqrDiff apd.Decimal
  2225  
  2226  	// Variables used as scratch space within iterations.
  2227  	tmpCount apd.Decimal
  2228  	tmpMean  apd.Decimal
  2229  	delta    apd.Decimal
  2230  	tmp      apd.Decimal
  2231  }
  2232  
  2233  func newDecimalSumSqrDiffs(evalCtx *tree.EvalContext) decimalSqrDiff {
  2234  	ed := apd.MakeErrDecimal(tree.IntermediateCtx)
  2235  	return &decimalSumSqrDiffsAggregate{
  2236  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
  2237  		ed:                       &ed,
  2238  	}
  2239  }
  2240  
  2241  // Count is part of the decimalSqrDiff interface.
  2242  func (a *decimalSumSqrDiffsAggregate) Count() *apd.Decimal {
  2243  	return &a.count
  2244  }
  2245  
  2246  // Tmp is part of the decimalSumSqrDiffs interface.
  2247  func (a *decimalSumSqrDiffsAggregate) Tmp() *apd.Decimal {
  2248  	return &a.tmp
  2249  }
  2250  
  2251  func (a *decimalSumSqrDiffsAggregate) Add(
  2252  	ctx context.Context, sqrDiffD tree.Datum, otherArgs ...tree.Datum,
  2253  ) error {
  2254  	sumD := otherArgs[0]
  2255  	countD := otherArgs[1]
  2256  	if sqrDiffD == tree.DNull || sumD == tree.DNull || countD == tree.DNull {
  2257  		return nil
  2258  	}
  2259  	sqrDiff := &sqrDiffD.(*tree.DDecimal).Decimal
  2260  	sum := &sumD.(*tree.DDecimal).Decimal
  2261  	a.tmpCount.SetInt64(int64(*countD.(*tree.DInt)))
  2262  
  2263  	a.ed.Quo(&a.tmpMean, sum, &a.tmpCount)
  2264  	a.ed.Sub(&a.delta, &a.tmpMean, &a.mean)
  2265  
  2266  	// Compute the sum of Knuth/Welford sum of squared differences from the
  2267  	// mean in a single pass. Adapted from sum of RunningStats in
  2268  	// https://www.johndcook.com/blog/skewness_kurtosis and our
  2269  	// implementation of NumericStats
  2270  	// https://github.com/cockroachdb/cockroach/pull/17728.
  2271  
  2272  	// This is logically equivalent to
  2273  	//   sqrDiff + delta * delta * tmpCount * a.count / (tmpCount + a.count)
  2274  	// where the expression is computed from RIGHT to LEFT.
  2275  	a.ed.Add(&a.tmp, &a.tmpCount, &a.count)
  2276  	a.ed.Quo(&a.tmp, &a.count, &a.tmp)
  2277  	a.ed.Mul(&a.tmp, &a.tmpCount, &a.tmp)
  2278  	a.ed.Mul(&a.tmp, &a.delta, &a.tmp)
  2279  	a.ed.Mul(&a.tmp, &a.delta, &a.tmp)
  2280  	a.ed.Add(&a.tmp, sqrDiff, &a.tmp)
  2281  	// Update running squared difference.
  2282  	a.ed.Add(&a.sqrDiff, &a.sqrDiff, &a.tmp)
  2283  
  2284  	// Update total count.
  2285  	a.ed.Add(&a.count, &a.count, &a.tmpCount)
  2286  
  2287  	// This is logically equivalent to
  2288  	//   delta * tmpCount / a.count
  2289  	// where the expression is computed from LEFT to RIGHT.
  2290  	// Note `a.count` is now the total count (includes tmpCount).
  2291  	a.ed.Mul(&a.tmp, &a.delta, &a.tmpCount)
  2292  	a.ed.Quo(&a.tmp, &a.tmp, &a.count)
  2293  	// Update running mean.
  2294  	a.ed.Add(&a.mean, &a.mean, &a.tmp)
  2295  
  2296  	size := int64(tree.SizeOfDecimal(a.count) +
  2297  		tree.SizeOfDecimal(a.mean) +
  2298  		tree.SizeOfDecimal(a.sqrDiff) +
  2299  		tree.SizeOfDecimal(a.tmpCount) +
  2300  		tree.SizeOfDecimal(a.tmpMean) +
  2301  		tree.SizeOfDecimal(a.delta) +
  2302  		tree.SizeOfDecimal(a.tmp))
  2303  	if err := a.updateMemoryUsage(ctx, size); err != nil {
  2304  		return err
  2305  	}
  2306  
  2307  	return a.ed.Err()
  2308  }
  2309  
  2310  func (a *decimalSumSqrDiffsAggregate) Result() (tree.Datum, error) {
  2311  	if a.count.Cmp(decimalOne) < 0 {
  2312  		return tree.DNull, nil
  2313  	}
  2314  	dd := &tree.DDecimal{Decimal: a.sqrDiff}
  2315  	return dd, nil
  2316  }
  2317  
  2318  // Reset implements tree.AggregateFunc interface.
  2319  func (a *decimalSumSqrDiffsAggregate) Reset(ctx context.Context) {
  2320  	a.count.SetFinite(0, 0)
  2321  	a.mean.SetFinite(0, 0)
  2322  	a.sqrDiff.SetFinite(0, 0)
  2323  	a.reset(ctx)
  2324  }
  2325  
  2326  // Close is part of the tree.AggregateFunc interface.
  2327  func (a *decimalSumSqrDiffsAggregate) Close(ctx context.Context) {
  2328  	a.close(ctx)
  2329  }
  2330  
  2331  // Size is part of the tree.AggregateFunc interface.
  2332  func (a *decimalSumSqrDiffsAggregate) Size() int64 {
  2333  	return sizeOfDecimalSumSqrDiffsAggregate
  2334  }
  2335  
  2336  type floatSqrDiff interface {
  2337  	tree.AggregateFunc
  2338  	Count() int64
  2339  }
  2340  
  2341  type decimalSqrDiff interface {
  2342  	tree.AggregateFunc
  2343  	Count() *apd.Decimal
  2344  	Tmp() *apd.Decimal
  2345  }
  2346  
  2347  type floatVarianceAggregate struct {
  2348  	agg floatSqrDiff
  2349  }
  2350  
  2351  type decimalVarianceAggregate struct {
  2352  	agg decimalSqrDiff
  2353  }
  2354  
  2355  // Both Variance and FinalVariance aggregators have the same codepath for
  2356  // their tree.AggregateFunc interface.
  2357  // The key difference is that Variance employs SqrDiffAggregate which
  2358  // has one input: VALUE; whereas FinalVariance employs SumSqrDiffsAggregate
  2359  // which takes in three inputs: (local) SQRDIFF, SUM, COUNT.
  2360  // FinalVariance is used for local/final aggregation in distsql.
  2361  func newIntVarianceAggregate(
  2362  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2363  ) tree.AggregateFunc {
  2364  	return &decimalVarianceAggregate{agg: newIntSqrDiff(evalCtx)}
  2365  }
  2366  
  2367  func newFloatVarianceAggregate(
  2368  	_ []*types.T, _ *tree.EvalContext, _ tree.Datums,
  2369  ) tree.AggregateFunc {
  2370  	return &floatVarianceAggregate{agg: newFloatSqrDiff()}
  2371  }
  2372  
  2373  func newDecimalVarianceAggregate(
  2374  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2375  ) tree.AggregateFunc {
  2376  	return &decimalVarianceAggregate{agg: newDecimalSqrDiff(evalCtx)}
  2377  }
  2378  
  2379  func newFloatFinalVarianceAggregate(
  2380  	_ []*types.T, _ *tree.EvalContext, _ tree.Datums,
  2381  ) tree.AggregateFunc {
  2382  	return &floatVarianceAggregate{agg: newFloatSumSqrDiffs()}
  2383  }
  2384  
  2385  func newDecimalFinalVarianceAggregate(
  2386  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2387  ) tree.AggregateFunc {
  2388  	return &decimalVarianceAggregate{agg: newDecimalSumSqrDiffs(evalCtx)}
  2389  }
  2390  
  2391  // Add is part of the tree.AggregateFunc interface.
  2392  //  Variance: VALUE(float)
  2393  //  FinalVariance: SQRDIFF(float), SUM(float), COUNT(int)
  2394  func (a *floatVarianceAggregate) Add(
  2395  	ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum,
  2396  ) error {
  2397  	return a.agg.Add(ctx, firstArg, otherArgs...)
  2398  }
  2399  
  2400  // Add is part of the tree.AggregateFunc interface.
  2401  //  Variance: VALUE(int|decimal)
  2402  //  FinalVariance: SQRDIFF(decimal), SUM(decimal), COUNT(int)
  2403  func (a *decimalVarianceAggregate) Add(
  2404  	ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum,
  2405  ) error {
  2406  	return a.agg.Add(ctx, firstArg, otherArgs...)
  2407  }
  2408  
  2409  // Result calculates the variance from the member square difference aggregator.
  2410  func (a *floatVarianceAggregate) Result() (tree.Datum, error) {
  2411  	if a.agg.Count() < 2 {
  2412  		return tree.DNull, nil
  2413  	}
  2414  	sqrDiff, err := a.agg.Result()
  2415  	if err != nil {
  2416  		return nil, err
  2417  	}
  2418  	return tree.NewDFloat(tree.DFloat(float64(*sqrDiff.(*tree.DFloat)) / (float64(a.agg.Count()) - 1))), nil
  2419  }
  2420  
  2421  // Result calculates the variance from the member square difference aggregator.
  2422  func (a *decimalVarianceAggregate) Result() (tree.Datum, error) {
  2423  	if a.agg.Count().Cmp(decimalTwo) < 0 {
  2424  		return tree.DNull, nil
  2425  	}
  2426  	sqrDiff, err := a.agg.Result()
  2427  	if err != nil {
  2428  		return nil, err
  2429  	}
  2430  	if _, err = tree.IntermediateCtx.Sub(a.agg.Tmp(), a.agg.Count(), decimalOne); err != nil {
  2431  		return nil, err
  2432  	}
  2433  	dd := &tree.DDecimal{}
  2434  	if _, err = tree.DecimalCtx.Quo(&dd.Decimal, &sqrDiff.(*tree.DDecimal).Decimal, a.agg.Tmp()); err != nil {
  2435  		return nil, err
  2436  	}
  2437  	// Remove trailing zeros. Depending on the order in which the input is
  2438  	// processed, some number of trailing zeros could be added to the
  2439  	// output. Remove them so that the results are the same regardless of
  2440  	// order.
  2441  	dd.Decimal.Reduce(&dd.Decimal)
  2442  	return dd, nil
  2443  }
  2444  
  2445  // Reset implements tree.AggregateFunc interface.
  2446  func (a *floatVarianceAggregate) Reset(ctx context.Context) {
  2447  	a.agg.Reset(ctx)
  2448  }
  2449  
  2450  // Close is part of the tree.AggregateFunc interface.
  2451  func (a *floatVarianceAggregate) Close(ctx context.Context) {
  2452  	a.agg.Close(ctx)
  2453  }
  2454  
  2455  // Size is part of the tree.AggregateFunc interface.
  2456  func (a *floatVarianceAggregate) Size() int64 {
  2457  	return sizeOfFloatVarianceAggregate
  2458  }
  2459  
  2460  // Reset implements tree.AggregateFunc interface.
  2461  func (a *decimalVarianceAggregate) Reset(ctx context.Context) {
  2462  	a.agg.Reset(ctx)
  2463  }
  2464  
  2465  // Close is part of the tree.AggregateFunc interface.
  2466  func (a *decimalVarianceAggregate) Close(ctx context.Context) {
  2467  	a.agg.Close(ctx)
  2468  }
  2469  
  2470  // Size is part of the tree.AggregateFunc interface.
  2471  func (a *decimalVarianceAggregate) Size() int64 {
  2472  	return sizeOfDecimalVarianceAggregate
  2473  }
  2474  
  2475  type floatStdDevAggregate struct {
  2476  	agg tree.AggregateFunc
  2477  }
  2478  
  2479  type decimalStdDevAggregate struct {
  2480  	agg tree.AggregateFunc
  2481  }
  2482  
  2483  // Both StdDev and FinalStdDev aggregators have the same codepath for
  2484  // their tree.AggregateFunc interface.
  2485  // The key difference is that StdDev employs SqrDiffAggregate which
  2486  // has one input: VALUE; whereas FinalStdDev employs SumSqrDiffsAggregate
  2487  // which takes in three inputs: (local) SQRDIFF, SUM, COUNT.
  2488  // FinalStdDev is used for local/final aggregation in distsql.
  2489  func newIntStdDevAggregate(
  2490  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
  2491  ) tree.AggregateFunc {
  2492  	return &decimalStdDevAggregate{agg: newIntVarianceAggregate(params, evalCtx, arguments)}
  2493  }
  2494  
  2495  func newFloatStdDevAggregate(
  2496  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
  2497  ) tree.AggregateFunc {
  2498  	return &floatStdDevAggregate{agg: newFloatVarianceAggregate(params, evalCtx, arguments)}
  2499  }
  2500  
  2501  func newDecimalStdDevAggregate(
  2502  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
  2503  ) tree.AggregateFunc {
  2504  	return &decimalStdDevAggregate{agg: newDecimalVarianceAggregate(params, evalCtx, arguments)}
  2505  }
  2506  
  2507  func newFloatFinalStdDevAggregate(
  2508  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
  2509  ) tree.AggregateFunc {
  2510  	return &floatStdDevAggregate{agg: newFloatFinalVarianceAggregate(params, evalCtx, arguments)}
  2511  }
  2512  
  2513  func newDecimalFinalStdDevAggregate(
  2514  	params []*types.T, evalCtx *tree.EvalContext, arguments tree.Datums,
  2515  ) tree.AggregateFunc {
  2516  	return &decimalStdDevAggregate{agg: newDecimalFinalVarianceAggregate(params, evalCtx, arguments)}
  2517  }
  2518  
  2519  // Add implements the tree.AggregateFunc interface.
  2520  // The signature of the datums is:
  2521  //  StdDev: VALUE(float)
  2522  //  FinalStdDev: SQRDIFF(float), SUM(float), COUNT(int)
  2523  func (a *floatStdDevAggregate) Add(
  2524  	ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum,
  2525  ) error {
  2526  	return a.agg.Add(ctx, firstArg, otherArgs...)
  2527  }
  2528  
  2529  // Add is part of the tree.AggregateFunc interface.
  2530  // The signature of the datums is:
  2531  //  StdDev: VALUE(int|decimal)
  2532  //  FinalStdDev: SQRDIFF(decimal), SUM(decimal), COUNT(int)
  2533  func (a *decimalStdDevAggregate) Add(
  2534  	ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum,
  2535  ) error {
  2536  	return a.agg.Add(ctx, firstArg, otherArgs...)
  2537  }
  2538  
  2539  // Result computes the square root of the variance aggregator.
  2540  func (a *floatStdDevAggregate) Result() (tree.Datum, error) {
  2541  	variance, err := a.agg.Result()
  2542  	if err != nil {
  2543  		return nil, err
  2544  	}
  2545  	if variance == tree.DNull {
  2546  		return variance, nil
  2547  	}
  2548  	return tree.NewDFloat(tree.DFloat(math.Sqrt(float64(*variance.(*tree.DFloat))))), nil
  2549  }
  2550  
  2551  // Result computes the square root of the variance aggregator.
  2552  func (a *decimalStdDevAggregate) Result() (tree.Datum, error) {
  2553  	// TODO(richardwu): both decimalVarianceAggregate and
  2554  	// finalDecimalVarianceAggregate return a decimal result with
  2555  	// default tree.DecimalCtx precision. We want to be able to specify that the
  2556  	// varianceAggregate use tree.IntermediateCtx (with the extra precision)
  2557  	// since it is returning an intermediate value for stdDevAggregate (of
  2558  	// which we take the Sqrt).
  2559  	variance, err := a.agg.Result()
  2560  	if err != nil {
  2561  		return nil, err
  2562  	}
  2563  	if variance == tree.DNull {
  2564  		return variance, nil
  2565  	}
  2566  	varianceDec := variance.(*tree.DDecimal)
  2567  	_, err = tree.DecimalCtx.Sqrt(&varianceDec.Decimal, &varianceDec.Decimal)
  2568  	return varianceDec, err
  2569  }
  2570  
  2571  // Reset implements tree.AggregateFunc interface.
  2572  func (a *floatStdDevAggregate) Reset(ctx context.Context) {
  2573  	a.agg.Reset(ctx)
  2574  }
  2575  
  2576  // Close is part of the tree.AggregateFunc interface.
  2577  func (a *floatStdDevAggregate) Close(ctx context.Context) {
  2578  	a.agg.Close(ctx)
  2579  }
  2580  
  2581  // Size is part of the tree.AggregateFunc interface.
  2582  func (a *floatStdDevAggregate) Size() int64 {
  2583  	return sizeOfFloatStdDevAggregate
  2584  }
  2585  
  2586  // Reset implements tree.AggregateFunc interface.
  2587  func (a *decimalStdDevAggregate) Reset(ctx context.Context) {
  2588  	a.agg.Reset(ctx)
  2589  }
  2590  
  2591  // Close is part of the tree.AggregateFunc interface.
  2592  func (a *decimalStdDevAggregate) Close(ctx context.Context) {
  2593  	a.agg.Close(ctx)
  2594  }
  2595  
  2596  // Size is part of the tree.AggregateFunc interface.
  2597  func (a *decimalStdDevAggregate) Size() int64 {
  2598  	return sizeOfDecimalStdDevAggregate
  2599  }
  2600  
  2601  type bytesXorAggregate struct {
  2602  	singleDatumAggregateBase
  2603  
  2604  	sum        []byte
  2605  	sawNonNull bool
  2606  }
  2607  
  2608  func newBytesXorAggregate(
  2609  	_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2610  ) tree.AggregateFunc {
  2611  	return &bytesXorAggregate{singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx)}
  2612  }
  2613  
  2614  // Add inserts one value into the running xor.
  2615  func (a *bytesXorAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  2616  	if datum == tree.DNull {
  2617  		return nil
  2618  	}
  2619  	t := []byte(*datum.(*tree.DBytes))
  2620  	if !a.sawNonNull {
  2621  		if err := a.updateMemoryUsage(ctx, int64(len(t))); err != nil {
  2622  			return err
  2623  		}
  2624  		a.sum = append([]byte(nil), t...)
  2625  	} else if len(a.sum) != len(t) {
  2626  		return pgerror.Newf(pgcode.InvalidParameterValue,
  2627  			"arguments to xor must all be the same length %d vs %d", len(a.sum), len(t),
  2628  		)
  2629  	} else {
  2630  		for i := range t {
  2631  			a.sum[i] = a.sum[i] ^ t[i]
  2632  		}
  2633  	}
  2634  	a.sawNonNull = true
  2635  	return nil
  2636  }
  2637  
  2638  // Result returns the xor.
  2639  func (a *bytesXorAggregate) Result() (tree.Datum, error) {
  2640  	if !a.sawNonNull {
  2641  		return tree.DNull, nil
  2642  	}
  2643  	return tree.NewDBytes(tree.DBytes(a.sum)), nil
  2644  }
  2645  
  2646  // Reset implements tree.AggregateFunc interface.
  2647  func (a *bytesXorAggregate) Reset(ctx context.Context) {
  2648  	a.sum = nil
  2649  	a.sawNonNull = false
  2650  	a.reset(ctx)
  2651  }
  2652  
  2653  // Close is part of the tree.AggregateFunc interface.
  2654  func (a *bytesXorAggregate) Close(ctx context.Context) {
  2655  	a.close(ctx)
  2656  }
  2657  
  2658  // Size is part of the tree.AggregateFunc interface.
  2659  func (a *bytesXorAggregate) Size() int64 {
  2660  	return sizeOfBytesXorAggregate
  2661  }
  2662  
  2663  type intXorAggregate struct {
  2664  	sum        int64
  2665  	sawNonNull bool
  2666  }
  2667  
  2668  func newIntXorAggregate(_ []*types.T, _ *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  2669  	return &intXorAggregate{}
  2670  }
  2671  
  2672  // Add inserts one value into the running xor.
  2673  func (a *intXorAggregate) Add(_ context.Context, datum tree.Datum, _ ...tree.Datum) error {
  2674  	if datum == tree.DNull {
  2675  		return nil
  2676  	}
  2677  	x := int64(*datum.(*tree.DInt))
  2678  	a.sum = a.sum ^ x
  2679  	a.sawNonNull = true
  2680  	return nil
  2681  }
  2682  
  2683  // Result returns the xor.
  2684  func (a *intXorAggregate) Result() (tree.Datum, error) {
  2685  	if !a.sawNonNull {
  2686  		return tree.DNull, nil
  2687  	}
  2688  	return tree.NewDInt(tree.DInt(a.sum)), nil
  2689  }
  2690  
  2691  // Reset implements tree.AggregateFunc interface.
  2692  func (a *intXorAggregate) Reset(context.Context) {
  2693  	a.sum = 0
  2694  	a.sawNonNull = false
  2695  }
  2696  
  2697  // Close is part of the tree.AggregateFunc interface.
  2698  func (a *intXorAggregate) Close(context.Context) {}
  2699  
  2700  // Size is part of the tree.AggregateFunc interface.
  2701  func (a *intXorAggregate) Size() int64 {
  2702  	return sizeOfIntXorAggregate
  2703  }
  2704  
  2705  type jsonAggregate struct {
  2706  	singleDatumAggregateBase
  2707  
  2708  	loc        *time.Location
  2709  	builder    *json.ArrayBuilderWithCounter
  2710  	sawNonNull bool
  2711  }
  2712  
  2713  func newJSONAggregate(_ []*types.T, evalCtx *tree.EvalContext, _ tree.Datums) tree.AggregateFunc {
  2714  	return &jsonAggregate{
  2715  		singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx),
  2716  		loc:                      evalCtx.GetLocation(),
  2717  		builder:                  json.NewArrayBuilderWithCounter(),
  2718  		sawNonNull:               false,
  2719  	}
  2720  }
  2721  
  2722  // Add accumulates the transformed json into the JSON array.
  2723  func (a *jsonAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.Datum) error {
  2724  	j, err := tree.AsJSON(datum, a.loc)
  2725  	if err != nil {
  2726  		return err
  2727  	}
  2728  	a.builder.Add(j)
  2729  	if err = a.updateMemoryUsage(ctx, int64(a.builder.Size())); err != nil {
  2730  		return err
  2731  	}
  2732  	a.sawNonNull = true
  2733  	return nil
  2734  }
  2735  
  2736  // Result returns an DJSON from the array of JSON.
  2737  func (a *jsonAggregate) Result() (tree.Datum, error) {
  2738  	if a.sawNonNull {
  2739  		return tree.NewDJSON(a.builder.Build()), nil
  2740  	}
  2741  	return tree.DNull, nil
  2742  }
  2743  
  2744  // Reset implements tree.AggregateFunc interface.
  2745  func (a *jsonAggregate) Reset(ctx context.Context) {
  2746  	a.builder = json.NewArrayBuilderWithCounter()
  2747  	a.sawNonNull = false
  2748  	a.reset(ctx)
  2749  }
  2750  
  2751  // Close allows the aggregate to release the memory it requested during
  2752  // operation.
  2753  func (a *jsonAggregate) Close(ctx context.Context) {
  2754  	a.close(ctx)
  2755  }
  2756  
  2757  // Size is part of the tree.AggregateFunc interface.
  2758  func (a *jsonAggregate) Size() int64 {
  2759  	return sizeOfJSONAggregate
  2760  }
  2761  
  2762  // validateInputFractions validates that the inputs are expected and returns an
  2763  // array containing either a single fraction or multiple fractions.
  2764  func validateInputFractions(datum tree.Datum) ([]float64, bool, error) {
  2765  	fractions := make([]float64, 0)
  2766  	singleInput := false
  2767  
  2768  	validate := func(fraction float64) error {
  2769  		if fraction < 0 || fraction > 1.0 {
  2770  			return pgerror.Newf(pgcode.NumericValueOutOfRange,
  2771  				"percentile value %f is not between 0 and 1", fraction)
  2772  		}
  2773  		return nil
  2774  	}
  2775  
  2776  	if datum.ResolvedType().Identical(types.Float) {
  2777  		fraction := float64(tree.MustBeDFloat(datum))
  2778  		singleInput = true
  2779  		if err := validate(fraction); err != nil {
  2780  			return nil, false, err
  2781  		}
  2782  		fractions = append(fractions, fraction)
  2783  	} else if datum.ResolvedType().Equivalent(types.FloatArray) {
  2784  		fractionsDatum := tree.MustBeDArray(datum)
  2785  		for _, f := range fractionsDatum.Array {
  2786  			fraction := float64(tree.MustBeDFloat(f))
  2787  			if err := validate(fraction); err != nil {
  2788  				return nil, false, err
  2789  			}
  2790  			fractions = append(fractions, fraction)
  2791  		}
  2792  	} else {
  2793  		panic(fmt.Sprintf("unexpected input type, %s", datum.ResolvedType()))
  2794  	}
  2795  	return fractions, singleInput, nil
  2796  }
  2797  
  2798  type percentileDiscAggregate struct {
  2799  	arr *tree.DArray
  2800  	// Note that we do not embed singleDatumAggregateBase struct to help with
  2801  	// memory accounting because percentileDiscAggregate stores multiple datums
  2802  	// inside of arr.
  2803  	acc mon.BoundAccount
  2804  	// We need singleInput to differentiate whether the input was a single
  2805  	// fraction, or an array of fractions.
  2806  	singleInput bool
  2807  	fractions   []float64
  2808  }
  2809  
  2810  func newPercentileDiscAggregate(
  2811  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2812  ) tree.AggregateFunc {
  2813  	return &percentileDiscAggregate{
  2814  		arr: tree.NewDArray(params[1]),
  2815  		acc: evalCtx.Mon.MakeBoundAccount(),
  2816  	}
  2817  }
  2818  
  2819  // Add stores the input percentile and all the values to calculate the discrete percentile.
  2820  func (a *percentileDiscAggregate) Add(
  2821  	ctx context.Context, datum tree.Datum, others ...tree.Datum,
  2822  ) error {
  2823  	if len(a.fractions) == 0 && datum != tree.DNull {
  2824  		fractions, singleInput, err := validateInputFractions(datum)
  2825  		if err != nil {
  2826  			return err
  2827  		}
  2828  		a.fractions = fractions
  2829  		a.singleInput = singleInput
  2830  	}
  2831  
  2832  	if len(others) == 1 && others[0] != tree.DNull {
  2833  		if err := a.acc.Grow(ctx, int64(others[0].Size())); err != nil {
  2834  			return err
  2835  		}
  2836  		return a.arr.Append(others[0])
  2837  	} else if len(others) != 1 {
  2838  		panic(fmt.Sprintf("unexpected number of other datums passed in, expected 1, got %d", len(others)))
  2839  	}
  2840  	return nil
  2841  }
  2842  
  2843  // Result finds the discrete percentile.
  2844  func (a *percentileDiscAggregate) Result() (tree.Datum, error) {
  2845  	// Return null if there are no values.
  2846  	if a.arr.Len() == 0 {
  2847  		return tree.DNull, nil
  2848  	}
  2849  
  2850  	if len(a.fractions) > 0 {
  2851  		res := tree.NewDArray(a.arr.ParamTyp)
  2852  		for _, fraction := range a.fractions {
  2853  			// If zero fraction is specified then give the first index, otherwise account
  2854  			// for row index which uses zero-based indexing.
  2855  			if fraction == 0.0 {
  2856  				if err := res.Append(a.arr.Array[0]); err != nil {
  2857  					return nil, err
  2858  				}
  2859  				continue
  2860  			}
  2861  			// Use math.Ceil since we want the first value whose position equals or
  2862  			// exceeds the specified fraction.
  2863  			rowIndex := int(math.Ceil(fraction*float64(a.arr.Len()))) - 1
  2864  			if err := res.Append(a.arr.Array[rowIndex]); err != nil {
  2865  				return nil, err
  2866  			}
  2867  		}
  2868  
  2869  		if a.singleInput {
  2870  			return res.Array[0], nil
  2871  		}
  2872  		return res, nil
  2873  	}
  2874  	panic(fmt.Sprint("input must either be a single fraction, or an array of fractions"))
  2875  }
  2876  
  2877  // Reset implements tree.AggregateFunc interface.
  2878  func (a *percentileDiscAggregate) Reset(ctx context.Context) {
  2879  	a.arr = tree.NewDArray(a.arr.ParamTyp)
  2880  	a.acc.Empty(ctx)
  2881  	a.singleInput = false
  2882  	a.fractions = a.fractions[:0]
  2883  }
  2884  
  2885  // Close allows the aggregate to release the memory it requested during
  2886  // operation.
  2887  func (a *percentileDiscAggregate) Close(ctx context.Context) {
  2888  	a.acc.Close(ctx)
  2889  }
  2890  
  2891  // Size is part of the tree.AggregateFunc interface.
  2892  func (a *percentileDiscAggregate) Size() int64 {
  2893  	return sizeOfPercentileDiscAggregate
  2894  }
  2895  
  2896  type percentileContAggregate struct {
  2897  	arr *tree.DArray
  2898  	// Note that we do not embed singleDatumAggregateBase struct to help with
  2899  	// memory accounting because percentileContAggregate stores multiple datums
  2900  	// inside of arr.
  2901  	acc mon.BoundAccount
  2902  	// We need singleInput to differentiate whether the input was a single
  2903  	// fraction, or an array of fractions.
  2904  	singleInput bool
  2905  	fractions   []float64
  2906  }
  2907  
  2908  func newPercentileContAggregate(
  2909  	params []*types.T, evalCtx *tree.EvalContext, _ tree.Datums,
  2910  ) tree.AggregateFunc {
  2911  	return &percentileContAggregate{
  2912  		arr: tree.NewDArray(params[1]),
  2913  		acc: evalCtx.Mon.MakeBoundAccount(),
  2914  	}
  2915  }
  2916  
  2917  // Add stores the input percentile and all the values to calculate the continuous percentile.
  2918  func (a *percentileContAggregate) Add(
  2919  	ctx context.Context, datum tree.Datum, others ...tree.Datum,
  2920  ) error {
  2921  	if len(a.fractions) == 0 && datum != tree.DNull {
  2922  		fractions, singleInput, err := validateInputFractions(datum)
  2923  		if err != nil {
  2924  			return err
  2925  		}
  2926  		a.fractions = fractions
  2927  		a.singleInput = singleInput
  2928  	}
  2929  
  2930  	if len(others) == 1 && others[0] != tree.DNull {
  2931  		if err := a.acc.Grow(ctx, int64(others[0].Size())); err != nil {
  2932  			return err
  2933  		}
  2934  		return a.arr.Append(others[0])
  2935  	} else if len(others) != 1 {
  2936  		panic(fmt.Sprintf("unexpected number of other datums passed in, expected 1, got %d", len(others)))
  2937  	}
  2938  	return nil
  2939  }
  2940  
  2941  // Result finds the continuous percentile.
  2942  func (a *percentileContAggregate) Result() (tree.Datum, error) {
  2943  	// Return null if there are no values.
  2944  	if a.arr.Len() == 0 {
  2945  		return tree.DNull, nil
  2946  	}
  2947  	// The following is the formula for calculating the continuous percentile:
  2948  	// RN = (1 + (fraction * (frameSize - 1))
  2949  	// CRN = Ceil(RN)
  2950  	// FRN = Floor(RN)
  2951  	// If (CRN = FRN = RN) then the result is:
  2952  	//   (value at row RN)
  2953  	// Otherwise the result is:
  2954  	//   (CRN - RN) * (value at row FRN) +
  2955  	//   (RN - FRN) * (value at row CRN)
  2956  	// Adapted from:
  2957  	//   https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions110.htm
  2958  	// If the input specified was a single fraction, then return a single value.
  2959  	if len(a.fractions) > 0 {
  2960  		res := tree.NewDArray(a.arr.ParamTyp)
  2961  		for _, fraction := range a.fractions {
  2962  			rowNumber := 1.0 + (fraction * (float64(a.arr.Len()) - 1.0))
  2963  			ceilRowNumber := int(math.Ceil(rowNumber))
  2964  			floorRowNumber := int(math.Floor(rowNumber))
  2965  
  2966  			if a.arr.ParamTyp.Identical(types.Float) {
  2967  				var target float64
  2968  				if rowNumber == float64(ceilRowNumber) && rowNumber == float64(floorRowNumber) {
  2969  					target = float64(tree.MustBeDFloat(a.arr.Array[int(rowNumber)-1]))
  2970  				} else {
  2971  					floorValue := float64(tree.MustBeDFloat(a.arr.Array[floorRowNumber-1]))
  2972  					ceilValue := float64(tree.MustBeDFloat(a.arr.Array[ceilRowNumber-1]))
  2973  					target = (float64(ceilRowNumber)-rowNumber)*floorValue +
  2974  						(rowNumber-float64(floorRowNumber))*ceilValue
  2975  				}
  2976  				if err := res.Append(tree.NewDFloat(tree.DFloat(target))); err != nil {
  2977  					return nil, err
  2978  				}
  2979  			} else if a.arr.ParamTyp.Family() == types.IntervalFamily {
  2980  				var target *tree.DInterval
  2981  				if rowNumber == float64(ceilRowNumber) && rowNumber == float64(floorRowNumber) {
  2982  					target = tree.MustBeDInterval(a.arr.Array[int(rowNumber)-1])
  2983  				} else {
  2984  					floorValue := tree.MustBeDInterval(a.arr.Array[floorRowNumber-1]).AsFloat64()
  2985  					ceilValue := tree.MustBeDInterval(a.arr.Array[ceilRowNumber-1]).AsFloat64()
  2986  					targetDuration := duration.FromFloat64(
  2987  						(float64(ceilRowNumber)-rowNumber)*floorValue +
  2988  							(rowNumber-float64(floorRowNumber))*ceilValue)
  2989  					target = tree.NewDInterval(targetDuration, types.DefaultIntervalTypeMetadata)
  2990  				}
  2991  				if err := res.Append(target); err != nil {
  2992  					return nil, err
  2993  				}
  2994  			} else {
  2995  				panic(fmt.Sprintf("argument type must be float or interval, got %s", a.arr.ParamTyp.String()))
  2996  			}
  2997  		}
  2998  		if a.singleInput {
  2999  			return res.Array[0], nil
  3000  		}
  3001  		return res, nil
  3002  	}
  3003  	panic(fmt.Sprint("input must either be a single fraction, or an array of fractions"))
  3004  }
  3005  
  3006  // Reset implements tree.AggregateFunc interface.
  3007  func (a *percentileContAggregate) Reset(ctx context.Context) {
  3008  	a.arr = tree.NewDArray(a.arr.ParamTyp)
  3009  	a.acc.Empty(ctx)
  3010  	a.singleInput = false
  3011  	a.fractions = a.fractions[:0]
  3012  }
  3013  
  3014  // Close allows the aggregate to release the memory it requested during
  3015  // operation.
  3016  func (a *percentileContAggregate) Close(ctx context.Context) {
  3017  	a.acc.Close(ctx)
  3018  }
  3019  
  3020  // Size is part of the tree.AggregateFunc interface.
  3021  func (a *percentileContAggregate) Size() int64 {
  3022  	return sizeOfPercentileContAggregate
  3023  }