gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/prometheus/prometheus_test.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package prometheus
    16  
    17  import (
    18  	"bytes"
    19  	"errors"
    20  	"fmt"
    21  	"io"
    22  	"math"
    23  	"strings"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  	"unicode"
    28  
    29  	v1proto "github.com/golang/protobuf/proto"
    30  	"github.com/google/go-cmp/cmp"
    31  	"github.com/prometheus/common/expfmt"
    32  	"google.golang.org/protobuf/encoding/prototext"
    33  	"google.golang.org/protobuf/proto"
    34  	"google.golang.org/protobuf/reflect/protoreflect"
    35  	"google.golang.org/protobuf/testing/protocmp"
    36  	pb "gvisor.dev/gvisor/pkg/metric/metric_go_proto"
    37  )
    38  
    39  // timeNowMu is used to synchronize injection of time.Now.
    40  var timeNowMu sync.Mutex
    41  
    42  // at executes a function with the clock returning a given time.
    43  func at(when time.Time, f func()) {
    44  	timeNowMu.Lock()
    45  	defer timeNowMu.Unlock()
    46  	previousFunc := timeNow
    47  	timeNow = func() time.Time { return when }
    48  	defer func() { timeNow = previousFunc }()
    49  	f()
    50  }
    51  
    52  // newSnapshotAt creates a new Snapshot with the given timestamp.
    53  func newSnapshotAt(when time.Time) *Snapshot {
    54  	var s *Snapshot
    55  	at(when, func() {
    56  		s = NewSnapshot()
    57  	})
    58  	return s
    59  }
    60  
    61  // Helper builder type for metric metadata.
    62  type metricMetadata struct {
    63  	PB     *pb.MetricMetadata
    64  	Fields map[string]string
    65  }
    66  
    67  func (m *metricMetadata) clone() *metricMetadata {
    68  	m2 := &metricMetadata{
    69  		PB:     &pb.MetricMetadata{},
    70  		Fields: make(map[string]string, len(m.Fields)),
    71  	}
    72  	proto.Merge(m2.PB, m.PB)
    73  	for k, v := range m.Fields {
    74  		m2.Fields[k] = v
    75  	}
    76  	return m2
    77  }
    78  
    79  // withField returns a copy of this *metricMetadata with the given field added
    80  // to its metadata.
    81  func (m *metricMetadata) withField(fields ...*pb.MetricMetadata_Field) *metricMetadata {
    82  	m2 := m.clone()
    83  	m2.PB.Fields = make([]*pb.MetricMetadata_Field, 0, len(m.Fields)+len(fields))
    84  	copy(m2.PB.Fields, m.PB.Fields)
    85  	m2.PB.Fields = append(m2.PB.Fields, fields...)
    86  	return m2
    87  }
    88  
    89  // metric returns the Metric metadata struct for this metric metadata.
    90  func (m *metricMetadata) metric() *Metric {
    91  	var metricType Type
    92  	switch m.PB.GetType() {
    93  	case pb.MetricMetadata_TYPE_UINT64:
    94  		if m.PB.GetCumulative() {
    95  			metricType = TypeCounter
    96  		} else {
    97  			metricType = TypeGauge
    98  		}
    99  	case pb.MetricMetadata_TYPE_DISTRIBUTION:
   100  		metricType = TypeHistogram
   101  	default:
   102  		panic(fmt.Sprintf("invalid type %v", m.PB.GetType()))
   103  	}
   104  	return &Metric{
   105  		Name: m.PB.GetPrometheusName(),
   106  		Type: metricType,
   107  		Help: m.PB.GetDescription(),
   108  	}
   109  }
   110  
   111  // Convenient metric field metadata definitions.
   112  var (
   113  	field1 = &pb.MetricMetadata_Field{
   114  		FieldName:     "field1",
   115  		AllowedValues: []string{"val1a", "val1b"},
   116  	}
   117  	field2 = &pb.MetricMetadata_Field{
   118  		FieldName:     "field2",
   119  		AllowedValues: []string{"val2a", "val2b"},
   120  	}
   121  )
   122  
   123  // fieldVal returns a copy of this *metricMetadata with the given field-value
   124  // stored on the side of the metadata. Meant to be used during snapshot data
   125  // construction, where methods like int() make it easy to construct *Data
   126  // structs with field values.
   127  func (m *metricMetadata) fieldVal(field *pb.MetricMetadata_Field, val string) *metricMetadata {
   128  	return m.fieldVals(map[*pb.MetricMetadata_Field]string{field: val})
   129  }
   130  
   131  // fieldVals acts like fieldVal but for multiple fields, at the expense of
   132  // having a less convenient function signature.
   133  func (m *metricMetadata) fieldVals(fieldToVal map[*pb.MetricMetadata_Field]string) *metricMetadata {
   134  	m2 := m.clone()
   135  	for field, val := range fieldToVal {
   136  		m2.Fields[field.GetFieldName()] = val
   137  	}
   138  	return m2
   139  }
   140  
   141  // labels returns a label key-value map associated with the metricMetadata.
   142  func (m *metricMetadata) labels() map[string]string {
   143  	if len(m.Fields) == 0 {
   144  		return nil
   145  	}
   146  	return m.Fields
   147  }
   148  
   149  // int returns a new Data struct with the given value for the current metric.
   150  // If the current metric has fields, all of its fields must accept exactly one
   151  // value, and this value will be used as the value for that field.
   152  // If a field accepts multiple values, the function will panic.
   153  func (m *metricMetadata) int(val int64) *Data {
   154  	data := NewIntData(m.metric(), val)
   155  	data.Labels = m.labels()
   156  	return data
   157  }
   158  
   159  // float returns a new Data struct with the given value for the current metric.
   160  // If the current metric has fields, all of its fields must accept exactly one
   161  // value, and this value will be used as the value for that field.
   162  // If a field accepts multiple values, the function will panic.
   163  func (m *metricMetadata) float(val float64) *Data {
   164  	data := NewFloatData(m.metric(), val)
   165  	data.Labels = m.labels()
   166  	return data
   167  }
   168  
   169  // float returns a new Data struct with the given value for the current metric.
   170  // If the current metric has fields, all of its fields must accept exactly one
   171  // value, and this value will be used as the value for that field.
   172  // If a field accepts multiple values, the function will panic.
   173  func (m *metricMetadata) dist(samples ...int64) *Data {
   174  	var total, min, max int64
   175  	var ssd float64
   176  	buckets := make([]Bucket, len(m.PB.GetDistributionBucketLowerBounds())+1)
   177  	var bucket *Bucket
   178  	for i, lowerBound := range m.PB.GetDistributionBucketLowerBounds() {
   179  		(&buckets[i]).UpperBound = Number{Int: lowerBound}
   180  	}
   181  	(&buckets[len(buckets)-1]).UpperBound = Number{Float: math.Inf(1)}
   182  	for i, sample := range samples {
   183  		if i == 0 {
   184  			min = sample
   185  			max = sample
   186  		} else {
   187  			if sample < min {
   188  				min = sample
   189  			}
   190  			if sample > max {
   191  				max = sample
   192  			}
   193  			oldMean := float64(total) / float64(i+1)
   194  			newMean := float64(total+sample) / float64(i+2)
   195  			ssd += (float64(sample) - oldMean) * (float64(sample) - newMean)
   196  		}
   197  		total += sample
   198  		bucket = &buckets[0]
   199  		for i, lowerBound := range m.PB.GetDistributionBucketLowerBounds() {
   200  			if sample >= lowerBound {
   201  				bucket = &buckets[i+1]
   202  			} else {
   203  				break
   204  			}
   205  		}
   206  		bucket.Samples++
   207  	}
   208  	return &Data{
   209  		Metric: m.metric(),
   210  		Labels: m.labels(),
   211  		HistogramValue: &Histogram{
   212  			Total:                  Number{Int: total},
   213  			Buckets:                buckets,
   214  			Min:                    Number{Int: min},
   215  			Max:                    Number{Int: max},
   216  			SumOfSquaredDeviations: Number{Float: ssd},
   217  		},
   218  	}
   219  }
   220  
   221  // Convenient metric metadata definitions.
   222  var (
   223  	fooInt = &metricMetadata{
   224  		PB: &pb.MetricMetadata{
   225  			Name:           "fooInt",
   226  			PrometheusName: "foo_int",
   227  			Description:    "An integer about foo",
   228  			Cumulative:     false,
   229  			Units:          pb.MetricMetadata_UNITS_NONE,
   230  			Sync:           true,
   231  			Type:           pb.MetricMetadata_TYPE_UINT64,
   232  		},
   233  	}
   234  	fooCounter = &metricMetadata{
   235  		PB: &pb.MetricMetadata{
   236  			Name:           "fooCounter",
   237  			PrometheusName: "foo_counter",
   238  			Description:    "A counter of foos",
   239  			Cumulative:     true,
   240  			Units:          pb.MetricMetadata_UNITS_NONE,
   241  			Sync:           true,
   242  			Type:           pb.MetricMetadata_TYPE_UINT64,
   243  		},
   244  	}
   245  	fooDist = &metricMetadata{
   246  		PB: &pb.MetricMetadata{
   247  			Name:                          "fooDist",
   248  			PrometheusName:                "foo_dist",
   249  			Description:                   "A distribution about foo",
   250  			Cumulative:                    false,
   251  			Units:                         pb.MetricMetadata_UNITS_NONE,
   252  			Sync:                          true,
   253  			Type:                          pb.MetricMetadata_TYPE_DISTRIBUTION,
   254  			DistributionBucketLowerBounds: []int64{0, 1, 2, 4, 8},
   255  		},
   256  	}
   257  )
   258  
   259  // newMetricRegistration returns a new *metricRegistration.
   260  func newMetricRegistration(metricMetadata ...*metricMetadata) *pb.MetricRegistration {
   261  	metadatas := make([]*pb.MetricMetadata, len(metricMetadata))
   262  	for i, mm := range metricMetadata {
   263  		metadatas[i] = mm.PB
   264  	}
   265  	return &pb.MetricRegistration{
   266  		Metrics: metadatas,
   267  	}
   268  }
   269  
   270  func TestVerifier(t *testing.T) {
   271  	testStart := time.Now()
   272  	epsilon := func(n int) time.Time {
   273  		return testStart.Add(time.Duration(n) * time.Millisecond)
   274  	}
   275  	for _, test := range []struct {
   276  		Name string
   277  		// At is the time at which the test executes.
   278  		// If unset, `testStart` is assumed.
   279  		At time.Time
   280  		// Registration is the metric registration data.
   281  		Registration *pb.MetricRegistration
   282  		// WantVerifierCreationErr is true if the test expects the
   283  		// creation of the Verifier to fail. All the fields below it
   284  		// are ignored in this case.
   285  		WantVerifierCreationErr bool
   286  		// WantSuccess is a sequence of Snapshots to present to
   287  		// the verifier. The test expects all of them to pass verification.
   288  		// If unset, the test simply presents the WantFail Snapshot.
   289  		// If both WantSuccess and WantFail are unset, the test presents
   290  		// an empty snapshot and expects it to succeed.
   291  		WantSuccess []*Snapshot
   292  		// WantFail is a Snapshot to present to the verifier after all
   293  		// snapshots in WantSuccess have been presented.
   294  		// The test expects this Snapshot to fail verification.
   295  		// If unset, the test does not present any snapshot after
   296  		// having presented the WantSuccess Snapshots.
   297  		WantFail *Snapshot
   298  	}{
   299  		{
   300  			Name: "no metrics, empty snapshot",
   301  		},
   302  		{
   303  			Name:                    "duplicate metric",
   304  			Registration:            newMetricRegistration(fooInt, fooInt),
   305  			WantVerifierCreationErr: true,
   306  		},
   307  		{
   308  			Name:                    "duplicate metric with different field set",
   309  			Registration:            newMetricRegistration(fooInt, fooInt.withField(field1)),
   310  			WantVerifierCreationErr: true,
   311  		},
   312  		{
   313  			Name:                    "duplicate field in metric",
   314  			Registration:            newMetricRegistration(fooInt.withField(field1, field1)),
   315  			WantVerifierCreationErr: true,
   316  		},
   317  		{
   318  			Name: "no field allowed value",
   319  			Registration: newMetricRegistration(fooInt.withField(&pb.MetricMetadata_Field{
   320  				FieldName: "field1",
   321  			})),
   322  			WantVerifierCreationErr: true,
   323  		},
   324  		{
   325  			Name: "duplicate field allowed value",
   326  			Registration: newMetricRegistration(fooInt.withField(&pb.MetricMetadata_Field{
   327  				FieldName:     "field1",
   328  				AllowedValues: []string{"val1", "val1"},
   329  			})),
   330  			WantVerifierCreationErr: true,
   331  		},
   332  		{
   333  			Name: "invalid metric type",
   334  			Registration: newMetricRegistration(&metricMetadata{
   335  				PB: &pb.MetricMetadata{
   336  					Name:           "fooBar",
   337  					PrometheusName: "foo_bar",
   338  					Type:           pb.MetricMetadata_Type(1337),
   339  				}},
   340  			),
   341  			WantVerifierCreationErr: true,
   342  		},
   343  		{
   344  			Name: "empty metric name",
   345  			Registration: newMetricRegistration(&metricMetadata{
   346  				PB: &pb.MetricMetadata{
   347  					PrometheusName: "foo_bar",
   348  					Type:           pb.MetricMetadata_TYPE_UINT64,
   349  				}},
   350  			),
   351  			WantVerifierCreationErr: true,
   352  		},
   353  		{
   354  			Name: "empty Prometheus metric name",
   355  			Registration: newMetricRegistration(&metricMetadata{
   356  				PB: &pb.MetricMetadata{
   357  					Name: "fooBar",
   358  					Type: pb.MetricMetadata_TYPE_UINT64,
   359  				}},
   360  			),
   361  			WantVerifierCreationErr: true,
   362  		},
   363  		{
   364  			Name: "bad Prometheus metric name",
   365  			Registration: newMetricRegistration(&metricMetadata{
   366  				PB: &pb.MetricMetadata{
   367  					Name:           "fooBar",
   368  					PrometheusName: "fooBar",
   369  					Type:           pb.MetricMetadata_TYPE_UINT64,
   370  				}},
   371  			),
   372  			WantVerifierCreationErr: true,
   373  		},
   374  		{
   375  			Name: "bad first Prometheus metric name character",
   376  			Registration: newMetricRegistration(&metricMetadata{
   377  				PB: &pb.MetricMetadata{
   378  					Name:           "fooBar",
   379  					PrometheusName: "_foo_bar",
   380  					Type:           pb.MetricMetadata_TYPE_UINT64,
   381  				}},
   382  			),
   383  			WantVerifierCreationErr: true,
   384  		},
   385  		{
   386  			Name: "Prometheus metric name starts with reserved prefix",
   387  			Registration: newMetricRegistration(&metricMetadata{
   388  				PB: &pb.MetricMetadata{
   389  					Name:           "metaFooBar",
   390  					PrometheusName: "meta_foo_bar",
   391  					Type:           pb.MetricMetadata_TYPE_UINT64,
   392  				}},
   393  			),
   394  			WantVerifierCreationErr: true,
   395  		},
   396  		{
   397  			Name: "Prometheus metric name does not starts with reserved prefix but non-Prometheus metric name does",
   398  			Registration: newMetricRegistration(&metricMetadata{
   399  				PB: &pb.MetricMetadata{
   400  					Name:           "metaFooBar",
   401  					PrometheusName: "not_meta_foo_bar",
   402  					Type:           pb.MetricMetadata_TYPE_UINT64,
   403  				}},
   404  			),
   405  			WantVerifierCreationErr: false,
   406  		},
   407  		{
   408  			Name: "Prometheus metric name matches reserved one",
   409  			Registration: newMetricRegistration(&metricMetadata{
   410  				PB: &pb.MetricMetadata{
   411  					Name:           "doesNotMatter",
   412  					PrometheusName: ProcessStartTimeSeconds.Name,
   413  					Type:           pb.MetricMetadata_TYPE_UINT64,
   414  				}},
   415  			),
   416  			WantVerifierCreationErr: true,
   417  		},
   418  		{
   419  			Name: "no buckets",
   420  			Registration: newMetricRegistration(&metricMetadata{
   421  				PB: &pb.MetricMetadata{
   422  					Name:                          "fooBar",
   423  					PrometheusName:                "foo_bar",
   424  					Type:                          pb.MetricMetadata_TYPE_DISTRIBUTION,
   425  					DistributionBucketLowerBounds: []int64{},
   426  				}},
   427  			),
   428  			WantVerifierCreationErr: true,
   429  		},
   430  		{
   431  			Name: "too many buckets",
   432  			Registration: newMetricRegistration(&metricMetadata{
   433  				PB: &pb.MetricMetadata{
   434  					Name:                          "fooBar",
   435  					PrometheusName:                "foo_bar",
   436  					Type:                          pb.MetricMetadata_TYPE_DISTRIBUTION,
   437  					DistributionBucketLowerBounds: make([]int64, 999),
   438  				}},
   439  			),
   440  			WantVerifierCreationErr: true,
   441  		},
   442  		{
   443  			Name: "successful registration of complex set of metrics",
   444  			Registration: newMetricRegistration(
   445  				fooInt,
   446  				fooCounter.withField(field1, field2),
   447  				fooDist.withField(field2),
   448  			),
   449  		},
   450  		{
   451  			Name: "snapshot time ordering",
   452  			At:   epsilon(0),
   453  			WantSuccess: []*Snapshot{
   454  				newSnapshotAt(epsilon(-3)),
   455  				newSnapshotAt(epsilon(-2)),
   456  				newSnapshotAt(epsilon(-1)),
   457  			},
   458  			WantFail: newSnapshotAt(epsilon(-2)),
   459  		},
   460  		{
   461  			Name: "same snapshot time is ok",
   462  			At:   epsilon(0),
   463  			WantSuccess: []*Snapshot{
   464  				newSnapshotAt(epsilon(-3)),
   465  				newSnapshotAt(epsilon(-2)),
   466  				newSnapshotAt(epsilon(-1)),
   467  				newSnapshotAt(epsilon(-1)),
   468  				newSnapshotAt(epsilon(-1)),
   469  				newSnapshotAt(epsilon(-1)),
   470  				newSnapshotAt(epsilon(0)),
   471  				newSnapshotAt(epsilon(0)),
   472  				newSnapshotAt(epsilon(0)),
   473  				newSnapshotAt(epsilon(0)),
   474  			},
   475  		},
   476  		{
   477  			Name:     "snapshot from the future",
   478  			At:       epsilon(0),
   479  			WantFail: newSnapshotAt(epsilon(1)),
   480  		},
   481  		{
   482  			Name:     "snapshot from the long past",
   483  			At:       testStart,
   484  			WantFail: newSnapshotAt(testStart.Add(-25 * time.Hour)),
   485  		},
   486  		{
   487  			Name:         "simple metric update",
   488  			Registration: newMetricRegistration(fooInt),
   489  			WantSuccess: []*Snapshot{
   490  				newSnapshotAt(epsilon(-1)).Add(
   491  					fooInt.int(2),
   492  				),
   493  			},
   494  		},
   495  		{
   496  			Name:         "simple metric update multiple times",
   497  			Registration: newMetricRegistration(fooInt),
   498  			WantSuccess: []*Snapshot{
   499  				newSnapshotAt(epsilon(-3)).Add(fooInt.int(2)),
   500  				newSnapshotAt(epsilon(-2)).Add(fooInt.int(-1)),
   501  				newSnapshotAt(epsilon(-1)).Add(fooInt.int(4)),
   502  			},
   503  		},
   504  		{
   505  			Name:         "counter can go forwards",
   506  			Registration: newMetricRegistration(fooCounter),
   507  			WantSuccess: []*Snapshot{
   508  				newSnapshotAt(epsilon(-3)).Add(fooCounter.int(1)),
   509  				newSnapshotAt(epsilon(-2)).Add(fooCounter.int(3)),
   510  				newSnapshotAt(epsilon(-1)).Add(fooCounter.int(3)),
   511  			},
   512  		},
   513  		{
   514  			Name:         "counter cannot go backwards",
   515  			Registration: newMetricRegistration(fooCounter),
   516  			WantSuccess: []*Snapshot{
   517  				newSnapshotAt(epsilon(-3)).Add(fooCounter.int(1)),
   518  				newSnapshotAt(epsilon(-2)).Add(fooCounter.int(3)),
   519  			},
   520  			WantFail: newSnapshotAt(epsilon(-1)).Add(fooCounter.int(2)),
   521  		},
   522  		{
   523  			Name:         "counter cannot change type",
   524  			Registration: newMetricRegistration(fooCounter),
   525  			WantSuccess: []*Snapshot{
   526  				newSnapshotAt(epsilon(-3)).Add(fooCounter.int(1)),
   527  				newSnapshotAt(epsilon(-2)).Add(fooCounter.int(3)),
   528  			},
   529  			WantFail: newSnapshotAt(epsilon(-1)).Add(fooCounter.float(4)),
   530  		},
   531  		{
   532  			Name:         "update for unknown metric",
   533  			Registration: newMetricRegistration(fooInt),
   534  			WantFail:     newSnapshotAt(epsilon(-1)).Add(fooCounter.int(2)),
   535  		},
   536  		{
   537  			Name:         "update for mismatching metric definition: type",
   538  			Registration: newMetricRegistration(fooInt),
   539  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   540  				(&metricMetadata{PB: &pb.MetricMetadata{
   541  					PrometheusName: fooInt.PB.GetPrometheusName(),
   542  					Type:           pb.MetricMetadata_TYPE_DISTRIBUTION,
   543  					Description:    fooInt.PB.GetDescription(),
   544  				}}).int(2),
   545  			),
   546  		},
   547  		{
   548  			Name:         "update for mismatching metric definition: name",
   549  			Registration: newMetricRegistration(fooInt),
   550  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   551  				(&metricMetadata{PB: &pb.MetricMetadata{
   552  					PrometheusName: "not_foo_int",
   553  					Type:           fooInt.PB.GetType(),
   554  					Description:    fooInt.PB.GetDescription(),
   555  				}}).int(2),
   556  			),
   557  		},
   558  		{
   559  			Name:         "update for mismatching metric definition: description",
   560  			Registration: newMetricRegistration(fooInt),
   561  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   562  				(&metricMetadata{PB: &pb.MetricMetadata{
   563  					PrometheusName: fooInt.PB.GetPrometheusName(),
   564  					Type:           fooInt.PB.GetType(),
   565  					Description:    "not fooInt's description",
   566  				}}).int(2),
   567  			),
   568  		},
   569  		{
   570  			Name:         "update with no fields for metric with fields",
   571  			Registration: newMetricRegistration(fooInt.withField(field1)),
   572  			WantFail:     newSnapshotAt(epsilon(-1)).Add(fooInt.int(2)),
   573  		},
   574  		{
   575  			Name:         "update with fields for metric without fields",
   576  			Registration: newMetricRegistration(fooInt),
   577  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   578  				fooInt.fieldVal(field1, "val1a").int(2),
   579  			),
   580  		},
   581  		{
   582  			Name:         "update with invalid field value",
   583  			Registration: newMetricRegistration(fooInt.withField(field1)),
   584  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   585  				fooInt.fieldVal(field1, "not_val1a").int(2),
   586  			),
   587  		},
   588  		{
   589  			Name:         "update with valid field value for wrong field",
   590  			Registration: newMetricRegistration(fooInt.withField(field1)),
   591  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   592  				fooInt.fieldVal(field2, "val1a").int(2),
   593  			),
   594  		},
   595  		{
   596  			Name:         "update with valid field values provided twice",
   597  			Registration: newMetricRegistration(fooInt.withField(field1)),
   598  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   599  				fooInt.fieldVal(field1, "val1a").int(2),
   600  				fooInt.fieldVal(field1, "val1a").int(2),
   601  			),
   602  		},
   603  		{
   604  			Name:         "update with valid field value",
   605  			Registration: newMetricRegistration(fooInt.withField(field1)),
   606  			WantSuccess: []*Snapshot{
   607  				newSnapshotAt(epsilon(-1)).Add(
   608  					fooInt.fieldVal(field1, "val1a").int(7),
   609  					fooInt.fieldVal(field1, "val1b").int(2),
   610  				),
   611  			},
   612  		},
   613  		{
   614  			Name:         "update with multiple valid field value",
   615  			Registration: newMetricRegistration(fooCounter.withField(field1, field2)),
   616  			WantSuccess: []*Snapshot{
   617  				newSnapshotAt(epsilon(-1)).Add(
   618  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   619  						field1: "val1a",
   620  						field2: "val2a",
   621  					}).int(3),
   622  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   623  						field1: "val1b",
   624  						field2: "val2a",
   625  					}).int(2),
   626  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   627  						field1: "val1a",
   628  						field2: "val2b",
   629  					}).int(1),
   630  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   631  						field1: "val1b",
   632  						field2: "val2b",
   633  					}).int(4),
   634  				),
   635  			},
   636  		},
   637  		{
   638  			Name:         "update with multiple valid field values but duplicated",
   639  			Registration: newMetricRegistration(fooCounter.withField(field1, field2)),
   640  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   641  				fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   642  					field1: "val1b",
   643  					field2: "val2b",
   644  				}).int(4),
   645  				fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   646  					field1: "val1b",
   647  					field2: "val2b",
   648  				}).int(4),
   649  			),
   650  		},
   651  		{
   652  			Name: "update with same valid field values across two metrics",
   653  			Registration: newMetricRegistration(
   654  				fooInt.withField(field1, field2),
   655  				fooCounter.withField(field1, field2),
   656  			),
   657  			WantSuccess: []*Snapshot{
   658  				newSnapshotAt(epsilon(-1)).Add(
   659  					fooInt.fieldVals(map[*pb.MetricMetadata_Field]string{
   660  						field1: "val1a",
   661  						field2: "val2a",
   662  					}).int(3),
   663  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
   664  						field1: "val1a",
   665  						field2: "val2a",
   666  					}).int(3),
   667  				),
   668  			},
   669  		},
   670  		{
   671  			Name:         "update with multiple value types",
   672  			Registration: newMetricRegistration(fooInt),
   673  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   674  				&Data{
   675  					Metric: fooInt.metric(),
   676  					Number: &Number{Int: 2},
   677  					HistogramValue: &Histogram{
   678  						Total: Number{Int: 5},
   679  						Buckets: []Bucket{
   680  							{UpperBound: Number{Int: 0}, Samples: 1},
   681  							{UpperBound: Number{Int: 1}, Samples: 1},
   682  						},
   683  					},
   684  				},
   685  			),
   686  		},
   687  		{
   688  			Name:         "integer metric gets float value",
   689  			Registration: newMetricRegistration(fooInt),
   690  			WantFail:     newSnapshotAt(epsilon(-1)).Add(fooInt.float(2.5)),
   691  		},
   692  		{
   693  			Name:         "metric gets no value",
   694  			Registration: newMetricRegistration(fooInt),
   695  			WantFail:     newSnapshotAt(epsilon(-1)).Add(&Data{Metric: fooInt.metric()}),
   696  		},
   697  		{
   698  			Name:         "distribution gets integer value",
   699  			Registration: newMetricRegistration(fooDist),
   700  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   701  				fooDist.int(2),
   702  			),
   703  		},
   704  		{
   705  			Name:         "successful distribution",
   706  			Registration: newMetricRegistration(fooDist),
   707  			WantSuccess: []*Snapshot{
   708  				newSnapshotAt(epsilon(-1)).Add(
   709  					fooDist.dist(1, 2, 3, 4, 5, 6),
   710  				),
   711  			},
   712  		},
   713  		{
   714  			Name:         "distribution updates",
   715  			Registration: newMetricRegistration(fooDist),
   716  			WantSuccess: []*Snapshot{
   717  				newSnapshotAt(epsilon(-2)).Add(
   718  					fooDist.dist(1, 2, 3, 4, 5, 6),
   719  				),
   720  				newSnapshotAt(epsilon(-1)).Add(
   721  					fooDist.dist(0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 25),
   722  				),
   723  			},
   724  		},
   725  		{
   726  			Name:         "distribution updates with fields",
   727  			Registration: newMetricRegistration(fooDist.withField(field1)),
   728  			WantSuccess: []*Snapshot{
   729  				newSnapshotAt(epsilon(-2)).Add(
   730  					fooDist.fieldVal(field1, "val1a").dist(1, 2, 3, 4, 5, 6),
   731  				),
   732  				newSnapshotAt(epsilon(-1)).Add(
   733  					fooDist.fieldVal(field1, "val1a").dist(0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 25),
   734  				),
   735  			},
   736  		},
   737  		{
   738  			Name:         "distribution cannot have number of samples regress",
   739  			Registration: newMetricRegistration(fooDist),
   740  			WantSuccess: []*Snapshot{
   741  				newSnapshotAt(epsilon(-3)).Add(
   742  					fooDist.dist(1, 2, 3, 4, 5, 6),
   743  				),
   744  				newSnapshotAt(epsilon(-2)).Add(
   745  					fooDist.dist(0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 25),
   746  				),
   747  			},
   748  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   749  				fooDist.dist(0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9),
   750  			),
   751  		},
   752  		{
   753  			Name:         "distribution sum-of-squared-deviations must be a floating-point number",
   754  			Registration: newMetricRegistration(fooDist),
   755  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   756  				&Data{
   757  					Metric: fooDist.metric(),
   758  					Labels: fooDist.labels(),
   759  					HistogramValue: &Histogram{
   760  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   761  						Min:     fooDist.dist(1, 2, 3).HistogramValue.Min,
   762  						Max:     fooDist.dist(1, 2, 3).HistogramValue.Max,
   763  						SumOfSquaredDeviations: Number{
   764  							Int: int64(fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations.Float),
   765  						},
   766  					},
   767  				},
   768  			),
   769  		},
   770  		{
   771  			Name:         "distribution cannot have sum-of-squared-deviations regress",
   772  			Registration: newMetricRegistration(fooDist),
   773  			WantSuccess: []*Snapshot{
   774  				newSnapshotAt(epsilon(-2)).Add(
   775  					&Data{
   776  						Metric: fooDist.metric(),
   777  						Labels: fooDist.labels(),
   778  						HistogramValue: &Histogram{
   779  							Buckets:                fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   780  							Min:                    fooDist.dist(1, 2, 3).HistogramValue.Min,
   781  							Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   782  							SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   783  						},
   784  					},
   785  				),
   786  			},
   787  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   788  				&Data{
   789  					Metric: fooDist.metric(),
   790  					Labels: fooDist.labels(),
   791  					HistogramValue: &Histogram{
   792  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   793  						Min:     fooDist.dist(1, 2, 3).HistogramValue.Min,
   794  						Max:     fooDist.dist(1, 2, 3).HistogramValue.Max,
   795  						SumOfSquaredDeviations: Number{
   796  							Float: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations.Float - 1.0,
   797  						},
   798  					},
   799  				},
   800  			),
   801  		},
   802  		{
   803  			Name:         "distribution cannot have minimum increase",
   804  			Registration: newMetricRegistration(fooDist),
   805  			WantSuccess: []*Snapshot{
   806  				newSnapshotAt(epsilon(-2)).Add(
   807  					&Data{
   808  						Metric: fooDist.metric(),
   809  						Labels: fooDist.labels(),
   810  						HistogramValue: &Histogram{
   811  							Buckets:                fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   812  							Min:                    fooDist.dist(1, 2, 3).HistogramValue.Min,
   813  							Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   814  							SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   815  						},
   816  					},
   817  				),
   818  			},
   819  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   820  				&Data{
   821  					Metric: fooDist.metric(),
   822  					Labels: fooDist.labels(),
   823  					HistogramValue: &Histogram{
   824  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   825  						Min: Number{
   826  							Int: fooDist.dist(1, 2, 3).HistogramValue.Min.Int + 1,
   827  						},
   828  						Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   829  						SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   830  					},
   831  				},
   832  			),
   833  		},
   834  		{
   835  			Name:         "distribution cannot have minimum value change type",
   836  			Registration: newMetricRegistration(fooDist),
   837  			WantSuccess: []*Snapshot{
   838  				newSnapshotAt(epsilon(-2)).Add(
   839  					&Data{
   840  						Metric: fooDist.metric(),
   841  						Labels: fooDist.labels(),
   842  						HistogramValue: &Histogram{
   843  							Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   844  							Min: Number{
   845  								Int: fooDist.dist(1, 2, 3).HistogramValue.Min.Int,
   846  							},
   847  							Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   848  							SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   849  						},
   850  					},
   851  				),
   852  			},
   853  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   854  				&Data{
   855  					Metric: fooDist.metric(),
   856  					Labels: fooDist.labels(),
   857  					HistogramValue: &Histogram{
   858  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   859  						Min: Number{
   860  							Float: float64(fooDist.dist(1, 2, 3).HistogramValue.Min.Int),
   861  						},
   862  						Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   863  						SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   864  					},
   865  				},
   866  			),
   867  		},
   868  		{
   869  			Name:         "distribution cannot have maximum decrease",
   870  			Registration: newMetricRegistration(fooDist),
   871  			WantSuccess: []*Snapshot{
   872  				newSnapshotAt(epsilon(-2)).Add(
   873  					&Data{
   874  						Metric: fooDist.metric(),
   875  						Labels: fooDist.labels(),
   876  						HistogramValue: &Histogram{
   877  							Buckets:                fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   878  							Min:                    fooDist.dist(1, 2, 3).HistogramValue.Min,
   879  							Max:                    fooDist.dist(1, 2, 3).HistogramValue.Max,
   880  							SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   881  						},
   882  					},
   883  				),
   884  			},
   885  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   886  				&Data{
   887  					Metric: fooDist.metric(),
   888  					Labels: fooDist.labels(),
   889  					HistogramValue: &Histogram{
   890  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   891  						Min:     fooDist.dist(1, 2, 3).HistogramValue.Min,
   892  						Max: Number{
   893  							Int: fooDist.dist(1, 2, 3).HistogramValue.Max.Int - 1,
   894  						},
   895  						SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   896  					},
   897  				},
   898  			),
   899  		},
   900  		{
   901  			Name:         "distribution cannot have maximum value change type",
   902  			Registration: newMetricRegistration(fooDist),
   903  			WantSuccess: []*Snapshot{
   904  				newSnapshotAt(epsilon(-2)).Add(
   905  					&Data{
   906  						Metric: fooDist.metric(),
   907  						Labels: fooDist.labels(),
   908  						HistogramValue: &Histogram{
   909  							Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   910  							Min:     fooDist.dist(1, 2, 3).HistogramValue.Min,
   911  							Max: Number{
   912  								Int: fooDist.dist(1, 2, 3).HistogramValue.Max.Int,
   913  							},
   914  							SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   915  						},
   916  					},
   917  				),
   918  			},
   919  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   920  				&Data{
   921  					Metric: fooDist.metric(),
   922  					Labels: fooDist.labels(),
   923  					HistogramValue: &Histogram{
   924  						Buckets: fooDist.dist(1, 2, 3).HistogramValue.Buckets,
   925  						Min:     fooDist.dist(1, 2, 3).HistogramValue.Min,
   926  						Max: Number{
   927  							Float: float64(fooDist.dist(1, 2, 3).HistogramValue.Max.Int),
   928  						},
   929  						SumOfSquaredDeviations: fooDist.dist(1, 2, 3).HistogramValue.SumOfSquaredDeviations,
   930  					},
   931  				},
   932  			),
   933  		},
   934  		{
   935  			Name:         "distribution with zero samples",
   936  			Registration: newMetricRegistration(fooDist),
   937  			WantSuccess: []*Snapshot{newSnapshotAt(epsilon(-1)).Add(
   938  				&Data{
   939  					Metric: fooDist.metric(),
   940  					HistogramValue: &Histogram{
   941  						Buckets: []Bucket{
   942  							{UpperBound: Number{Int: 0}, Samples: 0},
   943  							{UpperBound: Number{Int: 1}, Samples: 0},
   944  							{UpperBound: Number{Int: 2}, Samples: 0},
   945  							{UpperBound: Number{Int: 4}, Samples: 0},
   946  							{UpperBound: Number{Int: 8}, Samples: 0},
   947  							{UpperBound: Number{Float: math.Inf(1)}, Samples: 0},
   948  						},
   949  					},
   950  				},
   951  			)},
   952  		},
   953  		{
   954  			Name:         "distribution with manual samples",
   955  			Registration: newMetricRegistration(fooDist),
   956  			WantSuccess: []*Snapshot{newSnapshotAt(epsilon(-1)).Add(
   957  				&Data{
   958  					Metric: fooDist.metric(),
   959  					HistogramValue: &Histogram{
   960  						Total: Number{Int: 10},
   961  						Buckets: []Bucket{
   962  							{UpperBound: Number{Int: 0}, Samples: 2},
   963  							{UpperBound: Number{Int: 1}, Samples: 1},
   964  							{UpperBound: Number{Int: 2}, Samples: 3},
   965  							{UpperBound: Number{Int: 4}, Samples: 1},
   966  							{UpperBound: Number{Int: 8}, Samples: 4},
   967  							{UpperBound: Number{Float: math.Inf(1)}, Samples: 1},
   968  						},
   969  					},
   970  				},
   971  			)},
   972  		},
   973  		{
   974  			Name:         "distribution gets bad number of buckets",
   975  			Registration: newMetricRegistration(fooDist),
   976  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   977  				&Data{
   978  					Metric: fooDist.metric(),
   979  					HistogramValue: &Histogram{
   980  						Total: Number{Int: 10},
   981  						Buckets: []Bucket{
   982  							{UpperBound: Number{Int: 0}, Samples: 2},
   983  							{UpperBound: Number{Int: 1}, Samples: 1},
   984  							{UpperBound: Number{Int: 2}, Samples: 3},
   985  							// Missing: {UpperBound: Number{Int: 4}, Samples: 1},
   986  							{UpperBound: Number{Int: 8}, Samples: 4},
   987  							{UpperBound: Number{Float: math.Inf(1)}, Samples: 1},
   988  						},
   989  					},
   990  				},
   991  			),
   992  		},
   993  		{
   994  			Name:         "distribution gets unexpected bucket boundary",
   995  			Registration: newMetricRegistration(fooDist),
   996  			WantFail: newSnapshotAt(epsilon(-1)).Add(
   997  				&Data{
   998  					Metric: fooDist.metric(),
   999  					HistogramValue: &Histogram{
  1000  						Total: Number{Int: 10},
  1001  						Buckets: []Bucket{
  1002  							{UpperBound: Number{Int: 0}, Samples: 2},
  1003  							{UpperBound: Number{Int: 1}, Samples: 1},
  1004  							{UpperBound: Number{Int: 3 /* Should be 2 */}, Samples: 3},
  1005  							{UpperBound: Number{Int: 4}, Samples: 1},
  1006  							{UpperBound: Number{Int: 8}, Samples: 4},
  1007  							{UpperBound: Number{Float: math.Inf(1)}, Samples: 1},
  1008  						},
  1009  					},
  1010  				},
  1011  			),
  1012  		},
  1013  		{
  1014  			Name:         "distribution gets unexpected last bucket boundary",
  1015  			Registration: newMetricRegistration(fooDist),
  1016  			WantFail: newSnapshotAt(epsilon(-1)).Add(
  1017  				&Data{
  1018  					Metric: fooDist.metric(),
  1019  					HistogramValue: &Histogram{
  1020  						Total: Number{Int: 10},
  1021  						Buckets: []Bucket{
  1022  							{UpperBound: Number{Int: 0}, Samples: 2},
  1023  							{UpperBound: Number{Int: 1}, Samples: 1},
  1024  							{UpperBound: Number{Int: 2}, Samples: 3},
  1025  							{UpperBound: Number{Int: 4}, Samples: 1},
  1026  							{UpperBound: Number{Int: 8}, Samples: 4},
  1027  							{
  1028  								UpperBound: Number{Float: math.Inf(-1) /* Should be +inf */},
  1029  								Samples:    1,
  1030  							},
  1031  						},
  1032  					},
  1033  				},
  1034  			),
  1035  		},
  1036  		{
  1037  			Name:         "partial incremental snapshot needing indirection",
  1038  			Registration: newMetricRegistration(fooCounter),
  1039  			WantSuccess: []*Snapshot{
  1040  				newSnapshotAt(epsilon(-2)).Add(fooCounter.int(int64(maxDirectUint + 2))),
  1041  				newSnapshotAt(epsilon(-1)).Add(),
  1042  				newSnapshotAt(epsilon(0)).Add(fooCounter.int(int64(maxDirectUint + 3))),
  1043  			},
  1044  		},
  1045  		{
  1046  			Name: "worked example",
  1047  			Registration: newMetricRegistration(
  1048  				fooInt,
  1049  				fooDist.withField(field1),
  1050  				fooCounter.withField(field1, field2),
  1051  			),
  1052  			WantSuccess: []*Snapshot{
  1053  				// Empty snapshot.
  1054  				newSnapshotAt(epsilon(-6)),
  1055  				// Simple snapshot.
  1056  				newSnapshotAt(epsilon(-5)).Add(
  1057  					fooInt.int(3),
  1058  					fooDist.fieldVal(field1, "val1a").dist(1, 2, 3, 4, 5, 6),
  1059  					fooDist.fieldVal(field1, "val1b").dist(-1, -8, 100),
  1060  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1061  						field1: "val1a",
  1062  						field2: "val2a",
  1063  					}).int(6),
  1064  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1065  						field1: "val1b",
  1066  						field2: "val2a",
  1067  					}).int(3),
  1068  				),
  1069  				// And another.
  1070  				newSnapshotAt(epsilon(-4)).Add(
  1071  					fooInt.int(1),
  1072  					fooDist.fieldVal(field1, "val1a").dist(1, 2, 3, 4, 5, 6, 7),
  1073  					fooDist.fieldVal(field1, "val1b").dist(-1, -8, 100, 42),
  1074  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1075  						field1: "val1a",
  1076  						field2: "val2a",
  1077  					}).int(6),
  1078  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1079  						field1: "val1b",
  1080  						field2: "val2a",
  1081  					}).int(4),
  1082  				),
  1083  				// And another one, partial this time.
  1084  				newSnapshotAt(epsilon(-3)).Add(
  1085  					fooDist.fieldVal(field1, "val1b").dist(-1, -8, 100, 42, 1337),
  1086  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1087  						field1: "val1a",
  1088  						field2: "val2a",
  1089  					}).int(6),
  1090  				),
  1091  				// An empty one.
  1092  				newSnapshotAt(epsilon(-2)),
  1093  				// Another empty one at the same timestamp.
  1094  				newSnapshotAt(epsilon(-1)),
  1095  				// Another full one which doesn't change any value.
  1096  				newSnapshotAt(epsilon(0)).Add(
  1097  					fooInt.int(1),
  1098  					fooDist.fieldVal(field1, "val1a").dist(1, 2, 3, 4, 5, 6, 7),
  1099  					fooDist.fieldVal(field1, "val1b").dist(-1, -8, 100, 42, 1337),
  1100  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1101  						field1: "val1a",
  1102  						field2: "val2a",
  1103  					}).int(6),
  1104  					fooCounter.fieldVals(map[*pb.MetricMetadata_Field]string{
  1105  						field1: "val1b",
  1106  						field2: "val2a",
  1107  					}).int(4),
  1108  				),
  1109  			},
  1110  		},
  1111  	} {
  1112  		t.Run(test.Name, func(t *testing.T) {
  1113  			testTime := test.At
  1114  			if testTime.IsZero() {
  1115  				testTime = testStart
  1116  			}
  1117  			at(testTime, func() {
  1118  				t.Logf("Test is running with simulated time: %v", testTime)
  1119  				verifier, cleanup, err := NewVerifier(test.Registration)
  1120  				defer cleanup()
  1121  				if err != nil && !test.WantVerifierCreationErr {
  1122  					t.Fatalf("unexpected verifier creation error: %v", err)
  1123  				}
  1124  				if err == nil && test.WantVerifierCreationErr {
  1125  					t.Fatal("verifier creation unexpectedly succeeded")
  1126  				}
  1127  				if err != nil {
  1128  					t.Logf("Verifier creation failed (as expected by this test): %v", err)
  1129  					return
  1130  				}
  1131  
  1132  				if len(test.WantSuccess) == 0 && test.WantFail == nil {
  1133  					if err = verifier.Verify(NewSnapshot()); err != nil {
  1134  						t.Errorf("empty snapshot failed verification: %v", err)
  1135  					}
  1136  				} else {
  1137  					for i, snapshot := range test.WantSuccess {
  1138  						func() {
  1139  							defer func() {
  1140  								panicErr := recover()
  1141  								t.Helper()
  1142  								if panicErr != nil {
  1143  									t.Fatalf("panic during verification of WantSuccess[%d] snapshot: %v", i, panicErr)
  1144  								}
  1145  							}()
  1146  							if err = verifier.Verify(snapshot); err != nil {
  1147  								t.Fatalf("snapshot WantSuccess[%d] failed verification: %v", i, err)
  1148  							}
  1149  						}()
  1150  					}
  1151  					if test.WantFail != nil {
  1152  						func() {
  1153  							defer func() {
  1154  								panicErr := recover()
  1155  								t.Helper()
  1156  								if panicErr != nil {
  1157  									t.Fatalf("panic during verification of WantFail snapshot: %v", panicErr)
  1158  								}
  1159  							}()
  1160  							if err = verifier.Verify(test.WantFail); err == nil {
  1161  								t.Error("WantFail snapshot unexpectedly succeeded verification")
  1162  							} else {
  1163  								t.Logf("WantFail snapshot failed verification (as expected by this test): %v", err)
  1164  							}
  1165  						}()
  1166  					}
  1167  				}
  1168  			})
  1169  		})
  1170  	}
  1171  }
  1172  
  1173  // shortWriter implements io.StringWriter but fails after a given number of bytes.
  1174  type shortWriter struct {
  1175  	buf     strings.Builder
  1176  	size    int
  1177  	maxSize int
  1178  }
  1179  
  1180  // Reset erases buffer data and resets the shortWriter to the given size.
  1181  func (s *shortWriter) Reset(size int) {
  1182  	s.buf.Reset()
  1183  	s.size = 0
  1184  	s.maxSize = size
  1185  }
  1186  
  1187  // String returns the buffered data as a string.
  1188  func (s *shortWriter) String() string {
  1189  	return s.buf.String()
  1190  }
  1191  
  1192  // Write implements io.StringWriter.WriteString.
  1193  func (s *shortWriter) WriteString(x string) (n int, err error) {
  1194  	toWrite := len(x)
  1195  	leftToWrite := s.maxSize - s.size
  1196  	if leftToWrite < toWrite {
  1197  		toWrite = leftToWrite
  1198  	}
  1199  	if toWrite == 0 {
  1200  		return 0, errors.New("writer out of capacity")
  1201  	}
  1202  	written, err := s.buf.WriteString(x[:toWrite])
  1203  	s.size += written
  1204  	if written == len(x) {
  1205  		return written, err
  1206  	}
  1207  	return written, errors.New("short write")
  1208  }
  1209  
  1210  // reflectProto converts a v1 or v2 proto message to a proto message with
  1211  // reflection enabled.
  1212  func reflectProto(m any) protoreflect.ProtoMessage {
  1213  	if msg, hasReflection := m.(proto.Message); hasReflection {
  1214  		return msg
  1215  	}
  1216  	// Convert v1 proto to introspectable view, if possible and necessary.
  1217  	if v1pb, ok := m.(v1proto.Message); ok {
  1218  		return v1proto.MessageReflect(v1pb).Interface()
  1219  	}
  1220  	panic(fmt.Sprintf("Proto message %v isn't of a supported protobuf type", m))
  1221  }
  1222  
  1223  // TestSnapshotToPrometheus verifies that the contents of a Snapshot can be
  1224  // converted into text that can be parsed by the Prometheus parsing libraries,
  1225  // and produces the data we expect them to.
  1226  func TestSnapshotToPrometheus(t *testing.T) {
  1227  	singleLineFormatter := &prototext.MarshalOptions{Multiline: false, EmitUnknown: true}
  1228  	multiLineFormatter := &prototext.MarshalOptions{Multiline: true, Indent: "  ", EmitUnknown: true}
  1229  	testStart := time.Now()
  1230  	newSnapshot := func() *Snapshot {
  1231  		return newSnapshotAt(testStart)
  1232  	}
  1233  	for _, test := range []struct {
  1234  		Name string
  1235  
  1236  		// Snapshot will be rendered as Prometheus and compared against WantData.
  1237  		Snapshot *Snapshot
  1238  
  1239  		// ExportOptions dictates the options used during overall rendering.
  1240  		ExportOptions ExportOptions
  1241  
  1242  		// SnapshotExportOptions dictates the options used during Snapshot rendering.
  1243  		SnapshotExportOptions SnapshotExportOptions
  1244  
  1245  		// WantFail, if true, indicates that the test is expected to fail when
  1246  		// rendering or parsing the snapshot data.
  1247  		WantFail bool
  1248  
  1249  		// WantData is Prometheus text format that matches the data in Snapshot.
  1250  		// The substring "{TIMESTAMP}" will be replaced with the value of
  1251  		// `testStart` in milliseconds.
  1252  		WantData string
  1253  	}{
  1254  		{
  1255  			Name:     "empty snapshot",
  1256  			Snapshot: newSnapshot(),
  1257  		},
  1258  		{
  1259  			Name:     "simple integer",
  1260  			Snapshot: newSnapshot().Add(fooInt.int(3)),
  1261  			WantData: `
  1262  				# HELP foo_int An integer about foo
  1263  				# TYPE foo_int gauge
  1264  				foo_int 3 {TIMESTAMP}
  1265  			`,
  1266  		},
  1267  		{
  1268  			Name:     "simple float",
  1269  			Snapshot: newSnapshot().Add(fooInt.float(2.5)),
  1270  			WantData: `
  1271  				# HELP foo_int An integer about foo
  1272  				# TYPE foo_int gauge
  1273  				foo_int 2.5 {TIMESTAMP}
  1274  			`,
  1275  		},
  1276  		{
  1277  			Name:     "simple counter",
  1278  			Snapshot: newSnapshot().Add(fooCounter.int(4)),
  1279  			WantData: `
  1280  				# HELP foo_counter A counter of foos
  1281  				# TYPE foo_counter counter
  1282  				foo_counter 4 {TIMESTAMP}
  1283  			`,
  1284  		},
  1285  		{
  1286  			Name: "two metrics",
  1287  			Snapshot: newSnapshot().Add(
  1288  				// Note the different order here than in WantData,
  1289  				// to test ordering independence.
  1290  				fooCounter.int(4),
  1291  				fooInt.int(3),
  1292  			),
  1293  			WantData: `
  1294  				# HELP foo_int An integer about foo
  1295  				# TYPE foo_int gauge
  1296  				foo_int 3 {TIMESTAMP}
  1297  				# HELP foo_counter A counter of foos
  1298  				# TYPE foo_counter counter
  1299  				foo_counter 4 {TIMESTAMP}
  1300  			`,
  1301  		},
  1302  		{
  1303  			Name: "metric with 1 field",
  1304  			Snapshot: newSnapshot().Add(
  1305  				fooInt.fieldVal(field1, "val1a").int(3),
  1306  				fooInt.fieldVal(field1, "val1b").int(7),
  1307  			),
  1308  			WantData: `
  1309  				# HELP foo_int An integer about foo
  1310  				# TYPE foo_int gauge
  1311  				foo_int{field1="val1a"} 3 {TIMESTAMP}
  1312  				foo_int{field1="val1b"} 7 {TIMESTAMP}
  1313  			`,
  1314  		},
  1315  		{
  1316  			Name: "metric with 2 fields",
  1317  			Snapshot: newSnapshot().Add(
  1318  				fooInt.fieldVal(field1, "val1a").fieldVal(field2, "val2a").int(3),
  1319  				fooInt.fieldVal(field2, "val2b").fieldVal(field1, "val1b").int(7),
  1320  			),
  1321  			WantData: `
  1322  				# HELP foo_int An integer about foo
  1323  				# TYPE foo_int gauge
  1324  				foo_int{field1="val1a",field2="val2a"} 3 {TIMESTAMP}
  1325  				foo_int{field1="val1b",field2="val2b"} 7 {TIMESTAMP}
  1326  			`,
  1327  		},
  1328  		{
  1329  			Name:     "simple integer with export options",
  1330  			Snapshot: newSnapshot().Add(fooInt.int(3)),
  1331  			ExportOptions: ExportOptions{
  1332  				CommentHeader: "Some header",
  1333  			},
  1334  			SnapshotExportOptions: SnapshotExportOptions{
  1335  				ExporterPrefix: "some_prefix_",
  1336  				ExtraLabels: map[string]string{
  1337  					"field3": "val3a",
  1338  				},
  1339  			},
  1340  			WantData: `
  1341  				# HELP some_prefix_foo_int An integer about foo
  1342  				# TYPE some_prefix_foo_int gauge
  1343  				some_prefix_foo_int{field3="val3a"} 3 {TIMESTAMP}
  1344  			`,
  1345  		},
  1346  		{
  1347  			Name: "integer with fields mixing with export options",
  1348  			Snapshot: newSnapshot().Add(
  1349  				fooInt.fieldVal(field1, "val1a").fieldVal(field2, "val2a").int(3),
  1350  				fooInt.fieldVal(field2, "val2b").fieldVal(field1, "val1b").int(7),
  1351  			),
  1352  			SnapshotExportOptions: SnapshotExportOptions{
  1353  				ExtraLabels: map[string]string{
  1354  					"field3": "val3a",
  1355  				},
  1356  			},
  1357  			WantData: `
  1358  				# HELP foo_int An integer about foo
  1359  				# TYPE foo_int gauge
  1360  				foo_int{field1="val1a",field2="val2a",field3="val3a"} 3 {TIMESTAMP}
  1361  				foo_int{field1="val1b",field2="val2b",field3="val3a"} 7 {TIMESTAMP}
  1362  			`,
  1363  		},
  1364  		{
  1365  			Name: "integer with fields conflicting with export options",
  1366  			Snapshot: newSnapshot().Add(
  1367  				fooInt.fieldVal(field1, "val1a").fieldVal(field2, "val2a").int(3),
  1368  				fooInt.fieldVal(field2, "val2b").fieldVal(field1, "val1b").int(7),
  1369  			),
  1370  			SnapshotExportOptions: SnapshotExportOptions{
  1371  				ExtraLabels: map[string]string{
  1372  					"field2": "val2c",
  1373  					"field3": "val3a",
  1374  				},
  1375  			},
  1376  			WantFail: true,
  1377  		},
  1378  		{
  1379  			Name: "simple distribution",
  1380  			Snapshot: newSnapshot().Add(
  1381  				// -1 + 3 + 3 + 3 + 5 + 7 + 7 + 99 = 126
  1382  				fooDist.dist(-1, 3, 3, 3, 5, 7, 7, 99),
  1383  			),
  1384  			WantData: `
  1385  				# HELP foo_dist A distribution about foo
  1386  				# TYPE foo_dist histogram
  1387  				foo_dist_bucket{le="0"} 1 {TIMESTAMP}
  1388  				foo_dist_bucket{le="1"} 1 {TIMESTAMP}
  1389  				foo_dist_bucket{le="2"} 1 {TIMESTAMP}
  1390  				foo_dist_bucket{le="4"} 4 {TIMESTAMP}
  1391  				foo_dist_bucket{le="8"} 7 {TIMESTAMP}
  1392  				foo_dist_bucket{le="+inf"} 8 {TIMESTAMP}
  1393  				foo_dist_sum 126 {TIMESTAMP}
  1394  				foo_dist_count 8 {TIMESTAMP}
  1395  				foo_dist_min -1 {TIMESTAMP}
  1396  				foo_dist_max 99 {TIMESTAMP}
  1397  				foo_dist_ssd 8187.5 {TIMESTAMP}
  1398  			`,
  1399  		},
  1400  		{
  1401  			Name: "distribution with 'le' label",
  1402  			Snapshot: newSnapshot().Add(
  1403  				fooDist.fieldVal(&pb.MetricMetadata_Field{
  1404  					FieldName:     "le",
  1405  					AllowedValues: []string{"foo"},
  1406  				}, "foo").dist(-1, 3, 3, 3, 5, 7, 7, 99),
  1407  			),
  1408  			WantFail: true,
  1409  		},
  1410  		{
  1411  			Name: "distribution with no samples",
  1412  			Snapshot: newSnapshot().Add(
  1413  				fooDist.dist(),
  1414  			),
  1415  			WantData: `
  1416  				# HELP foo_dist A distribution about foo
  1417  				# TYPE foo_dist histogram
  1418  				foo_dist_bucket{le="0"} 0 {TIMESTAMP}
  1419  				foo_dist_bucket{le="1"} 0 {TIMESTAMP}
  1420  				foo_dist_bucket{le="2"} 0 {TIMESTAMP}
  1421  				foo_dist_bucket{le="4"} 0 {TIMESTAMP}
  1422  				foo_dist_bucket{le="8"} 0 {TIMESTAMP}
  1423  				foo_dist_bucket{le="+inf"} 0 {TIMESTAMP}
  1424  				foo_dist_sum 0 {TIMESTAMP}
  1425  				foo_dist_count 0 {TIMESTAMP}
  1426  				foo_dist_min 0 {TIMESTAMP}
  1427  				foo_dist_max 0 {TIMESTAMP}
  1428  				foo_dist_ssd 0 {TIMESTAMP}
  1429  			`,
  1430  		},
  1431  		{
  1432  			Name: "distribution with 1 field",
  1433  			Snapshot: newSnapshot().Add(
  1434  				// -1 + 3 + 3 + 3 + 5 + 7 + 7 + 99 = 126
  1435  				fooDist.fieldVal(field1, "val1a").dist(-1, 3, 3, 3, 5, 7, 7, 99),
  1436  				// 3 + 5 + 3 = 11
  1437  				fooDist.fieldVal(field1, "val1b").dist(3, 5, 3),
  1438  			),
  1439  			WantData: `
  1440  				# HELP foo_dist A distribution about foo
  1441  				# TYPE foo_dist histogram
  1442  				foo_dist_bucket{field1="val1a",le="0"} 1 {TIMESTAMP}
  1443  				foo_dist_bucket{field1="val1a",le="1"} 1 {TIMESTAMP}
  1444  				foo_dist_bucket{field1="val1a",le="2"} 1 {TIMESTAMP}
  1445  				foo_dist_bucket{field1="val1a",le="4"} 4 {TIMESTAMP}
  1446  				foo_dist_bucket{field1="val1a",le="8"} 7 {TIMESTAMP}
  1447  				foo_dist_bucket{field1="val1a",le="+inf"} 8 {TIMESTAMP}
  1448  				foo_dist_sum{field1="val1a"} 126 {TIMESTAMP}
  1449  				foo_dist_count{field1="val1a"} 8 {TIMESTAMP}
  1450  				foo_dist_min{field1="val1a"} -1 {TIMESTAMP}
  1451  				foo_dist_max{field1="val1a"} 99 {TIMESTAMP}
  1452  				foo_dist_ssd{field1="val1a"} 8187.5 {TIMESTAMP}
  1453  				foo_dist_bucket{field1="val1b",le="0"} 0 {TIMESTAMP}
  1454  				foo_dist_bucket{field1="val1b",le="1"} 0 {TIMESTAMP}
  1455  				foo_dist_bucket{field1="val1b",le="2"} 0 {TIMESTAMP}
  1456  				foo_dist_bucket{field1="val1b",le="4"} 2 {TIMESTAMP}
  1457  				foo_dist_bucket{field1="val1b",le="8"} 3 {TIMESTAMP}
  1458  				foo_dist_bucket{field1="val1b",le="+inf"} 3 {TIMESTAMP}
  1459  				foo_dist_sum{field1="val1b"} 11 {TIMESTAMP}
  1460  				foo_dist_count{field1="val1b"} 3 {TIMESTAMP}
  1461  				foo_dist_min{field1="val1b"} 3 {TIMESTAMP}
  1462  				foo_dist_max{field1="val1b"} 5 {TIMESTAMP}
  1463  				foo_dist_ssd{field1="val1b"} 8.25 {TIMESTAMP}
  1464  			`,
  1465  		},
  1466  		{
  1467  			Name: "distribution with 2 fields, one from ExportOptions",
  1468  			Snapshot: newSnapshot().Add(
  1469  				// -1 + 3 + 3 + 3 + 5 + 7 + 7 + 99 = 126
  1470  				fooDist.fieldVal(field1, "val1a").dist(-1, 3, 3, 3, 5, 7, 7, 99),
  1471  				// 3 + 5 + 3 = 11
  1472  				fooDist.fieldVal(field1, "val1b").dist(3, 5, 3),
  1473  			),
  1474  			ExportOptions: ExportOptions{
  1475  				CommentHeader: "Some header",
  1476  			},
  1477  			SnapshotExportOptions: SnapshotExportOptions{
  1478  				ExporterPrefix: "some_prefix_",
  1479  				ExtraLabels:    map[string]string{"field2": "val2a"},
  1480  			},
  1481  			WantData: `
  1482  				# HELP some_prefix_foo_dist A distribution about foo
  1483  				# TYPE some_prefix_foo_dist histogram
  1484  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="0"} 1 {TIMESTAMP}
  1485  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="1"} 1 {TIMESTAMP}
  1486  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="2"} 1 {TIMESTAMP}
  1487  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="4"} 4 {TIMESTAMP}
  1488  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="8"} 7 {TIMESTAMP}
  1489  				some_prefix_foo_dist_bucket{field1="val1a",field2="val2a",le="+inf"} 8 {TIMESTAMP}
  1490  				some_prefix_foo_dist_sum{field1="val1a",field2="val2a"} 126 {TIMESTAMP}
  1491  				some_prefix_foo_dist_count{field1="val1a",field2="val2a"} 8 {TIMESTAMP}
  1492  				some_prefix_foo_dist_min{field1="val1a",field2="val2a"} -1 {TIMESTAMP}
  1493  				some_prefix_foo_dist_max{field1="val1a",field2="val2a"} 99 {TIMESTAMP}
  1494  				some_prefix_foo_dist_ssd{field1="val1a",field2="val2a"} 8187.5 {TIMESTAMP}
  1495  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="0"} 0 {TIMESTAMP}
  1496  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="1"} 0 {TIMESTAMP}
  1497  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="2"} 0 {TIMESTAMP}
  1498  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="4"} 2 {TIMESTAMP}
  1499  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="8"} 3 {TIMESTAMP}
  1500  				some_prefix_foo_dist_bucket{field1="val1b",field2="val2a",le="+inf"} 3 {TIMESTAMP}
  1501  				some_prefix_foo_dist_sum{field1="val1b",field2="val2a"} 11 {TIMESTAMP}
  1502  				some_prefix_foo_dist_count{field1="val1b",field2="val2a"} 3 {TIMESTAMP}
  1503  				some_prefix_foo_dist_min{field1="val1b",field2="val2a"} 3 {TIMESTAMP}
  1504  				some_prefix_foo_dist_max{field1="val1b",field2="val2a"} 5 {TIMESTAMP}
  1505  				some_prefix_foo_dist_ssd{field1="val1b",field2="val2a"} 8.25 {TIMESTAMP}
  1506  			`,
  1507  		},
  1508  	} {
  1509  		t.Run(test.Name, func(t *testing.T) {
  1510  			// Render and parse snapshot data.
  1511  			var buf bytes.Buffer
  1512  			snapshotToOptions := map[*Snapshot]SnapshotExportOptions{test.Snapshot: test.SnapshotExportOptions}
  1513  			if _, err := Write(&buf, test.ExportOptions, snapshotToOptions); err != nil {
  1514  				if test.WantFail {
  1515  					return
  1516  				}
  1517  				t.Fatalf("cannot write snapshot: %v", err)
  1518  			}
  1519  			gotMetricsRaw := buf.String()
  1520  			gotMetrics, err := (&expfmt.TextParser{}).TextToMetricFamilies(&buf)
  1521  			if err != nil {
  1522  				if test.WantFail {
  1523  					return
  1524  				}
  1525  				t.Fatalf("cannot parse data written from snapshot: %v", err)
  1526  			}
  1527  			if test.WantFail {
  1528  				t.Fatalf("Test unexpectedly succeeded to render and parse snapshot data")
  1529  			}
  1530  
  1531  			// Verify that the data is consistent (i.e. verify that it's not based on random map ordering)
  1532  			var buf2 bytes.Buffer
  1533  			if _, err := Write(&buf2, test.ExportOptions, snapshotToOptions); err != nil {
  1534  				if test.WantFail {
  1535  					return
  1536  				}
  1537  				t.Fatalf("cannot write snapshot: %v", err)
  1538  			}
  1539  			gotMetricsRaw2 := buf2.String()
  1540  			if gotMetricsRaw != gotMetricsRaw2 {
  1541  				t.Errorf("inconsistent snapshot rendering:\n\n%s\n\n---- VS ----\n\n%s\n\n", gotMetricsRaw, gotMetricsRaw2)
  1542  			}
  1543  
  1544  			// Verify that error propagation works by having the writer fail at each possible spot.
  1545  			// This exercises all the write error propagation branches.
  1546  			var shortWriter shortWriter
  1547  			for writeLength := 0; writeLength < len(gotMetricsRaw); writeLength++ {
  1548  				shortWriter.Reset(writeLength)
  1549  				if _, err := Write(&shortWriter, test.ExportOptions, snapshotToOptions); err == nil {
  1550  					t.Fatalf("snapshot data unexpectedly succeeded being written to short writer (length %d): %v", writeLength, shortWriter.String())
  1551  				}
  1552  				if shortWriter.size != writeLength {
  1553  					t.Fatalf("Short writer should have allowed %d bytes of snapshot data to be written, but actual number of bytes written is %d bytes", writeLength, shortWriter.size)
  1554  				}
  1555  			}
  1556  
  1557  			// Parse reference data.
  1558  			wantData := strings.ReplaceAll(test.WantData, "{TIMESTAMP}", fmt.Sprintf("%d", testStart.UnixMilli()))
  1559  			wantMetrics, err := (&expfmt.TextParser{}).TextToMetricFamilies(strings.NewReader(wantData))
  1560  			if err != nil {
  1561  				t.Fatalf("cannot parse reference data: %v", err)
  1562  			}
  1563  
  1564  			if len(test.Snapshot.Data) != 0 {
  1565  				// If the snapshot isn't empty, verify that the data we got from both `got` and `want`
  1566  				// is non-zero. Otherwise, this whole test could accidentally succeed by having all attempts
  1567  				// at  parsing the data result into an empty set.
  1568  				if len(wantMetrics) == 0 {
  1569  					t.Error("Snapshot is not empty, but parsing the reference data resulted in no data being produced")
  1570  				}
  1571  				if len(gotMetrics) == 0 {
  1572  					t.Error("Snapshot is not empty, but parsing the rendered snapshot resulted in no data being produced")
  1573  				}
  1574  			}
  1575  
  1576  			// Verify that all of `wantMetrics` is in `gotMetrics`.
  1577  			for metric, want := range wantMetrics {
  1578  				if _, found := gotMetrics[metric]; !found {
  1579  					wantText, err := singleLineFormatter.Marshal(reflectProto(want))
  1580  					if err != nil {
  1581  						t.Fatalf("cannot marshal reference data: %v", err)
  1582  					}
  1583  					t.Errorf("metric %s is in reference data (%v) but not present in snapshot data", metric, string(wantText))
  1584  				}
  1585  			}
  1586  
  1587  			// Verify that all of `gotMetrics` is in `wantMetrics`.
  1588  			for metric, got := range gotMetrics {
  1589  				if _, found := wantMetrics[metric]; !found {
  1590  					gotText, err := singleLineFormatter.Marshal(reflectProto(got))
  1591  					if err != nil {
  1592  						t.Fatalf("cannot marshal snapshot data: %v", err)
  1593  					}
  1594  					t.Errorf("metric %s found in snapshot data (%v) but not present in reference data", metric, string(gotText))
  1595  				}
  1596  			}
  1597  
  1598  			// The rest of the test assumes the keys are the same.
  1599  			if t.Failed() {
  1600  				return
  1601  			}
  1602  
  1603  			// Verify metric data matches.
  1604  			for metric := range wantMetrics {
  1605  				t.Run(metric, func(t *testing.T) {
  1606  					want := reflectProto(wantMetrics[metric])
  1607  					got := reflectProto(gotMetrics[metric])
  1608  					if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" {
  1609  						wantText, err := multiLineFormatter.Marshal(want)
  1610  						if err != nil {
  1611  							t.Fatalf("cannot marshal reference data: %v", err)
  1612  						}
  1613  						gotText, err := multiLineFormatter.Marshal(got)
  1614  						if err != nil {
  1615  							t.Fatalf("cannot marshal snapshot data: %v", err)
  1616  						}
  1617  						t.Errorf("Snapshot data did not produce the same data as the reference data.\n\nReference data:\n\n%v\n\nSnapshot data:\n\n%v\n\nDiff:\n\n%v\n\n", string(wantText), string(gotText), diff)
  1618  					}
  1619  				})
  1620  			}
  1621  		})
  1622  	}
  1623  }
  1624  
  1625  func TestWriteMultipleSnapshots(t *testing.T) {
  1626  	testStart := time.Now()
  1627  	snapshot1 := newSnapshotAt(testStart).Add(fooInt.int(3))
  1628  	snapshot2 := newSnapshotAt(testStart.Add(3 * time.Minute)).Add(fooInt.int(5))
  1629  	var buf bytes.Buffer
  1630  	Write(&buf, ExportOptions{CommentHeader: "A header\non two lines"}, map[*Snapshot]SnapshotExportOptions{
  1631  		snapshot1: {ExporterPrefix: "export_"},
  1632  		snapshot2: {ExporterPrefix: "export_"},
  1633  	})
  1634  	gotData, err := (&expfmt.TextParser{}).TextToMetricFamilies(&buf)
  1635  	if err != nil {
  1636  		t.Fatalf("cannot parse data written from snapshots: %v", err)
  1637  	}
  1638  	if len(gotData) != 1 || gotData["export_"+fooInt.PB.GetPrometheusName()] == nil {
  1639  		t.Fatalf("unexpected data: %v", gotData)
  1640  	}
  1641  	got := reflectProto(gotData["export_"+fooInt.PB.GetPrometheusName()])
  1642  	var wantBuf bytes.Buffer
  1643  	io.WriteString(&wantBuf, fmt.Sprintf(`
  1644  		# HELP export_foo_int An integer about foo
  1645  		# TYPE export_foo_int gauge
  1646  		export_foo_int 3 %d
  1647  		export_foo_int 5 %d
  1648  	`, testStart.UnixMilli(), testStart.Add(3*time.Minute).UnixMilli()))
  1649  	wantData, err := (&expfmt.TextParser{}).TextToMetricFamilies(&wantBuf)
  1650  	if err != nil {
  1651  		t.Fatalf("cannot parse reference data: %v", err)
  1652  	}
  1653  	if len(wantData) != 1 || wantData["export_"+fooInt.PB.GetPrometheusName()] == nil {
  1654  		t.Fatalf("unexpected reference data: %v", gotData)
  1655  	}
  1656  	want := reflectProto(wantData["export_"+fooInt.PB.GetPrometheusName()])
  1657  	if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" {
  1658  		multiLineFormatter := &prototext.MarshalOptions{Multiline: true, Indent: "  ", EmitUnknown: true}
  1659  		wantText, err := multiLineFormatter.Marshal(want)
  1660  		if err != nil {
  1661  			t.Fatalf("cannot marshal reference data: %v", err)
  1662  		}
  1663  		gotText, err := multiLineFormatter.Marshal(got)
  1664  		if err != nil {
  1665  			t.Fatalf("cannot marshal snapshot data: %v", err)
  1666  		}
  1667  		t.Errorf("Snapshot data did not produce the same data as the reference data.\n\nReference data:\n\n%v\n\nSnapshot data:\n\n%v\n\nDiff:\n\n%v\n\n", string(wantText), string(gotText), diff)
  1668  	}
  1669  }
  1670  
  1671  func TestGroupSameNameMetrics(t *testing.T) {
  1672  	snapshot1 := NewSnapshot().Add(
  1673  		fooCounter.int(3),
  1674  		fooInt.int(3),
  1675  		fooDist.dist(0, 1),
  1676  	)
  1677  	snapshot2 := NewSnapshot().Add(
  1678  		fooDist.dist(1, 2),
  1679  		fooCounter.int(2),
  1680  	)
  1681  	snapshot3 := NewSnapshot().Add(
  1682  		fooDist.dist(1, 2),
  1683  		fooCounter.int(2),
  1684  	)
  1685  	var buf bytes.Buffer
  1686  	_, err := Write(&buf, ExportOptions{}, map[*Snapshot]SnapshotExportOptions{
  1687  		snapshot1: {ExporterPrefix: "my_little_prefix_", ExtraLabels: map[string]string{"snap": "1"}},
  1688  		snapshot2: {ExporterPrefix: "my_little_prefix_", ExtraLabels: map[string]string{"snap": "2"}},
  1689  		snapshot3: {ExporterPrefix: "not_the_same_prefix_", ExtraLabels: map[string]string{"snap": "1"}},
  1690  	})
  1691  	if err != nil {
  1692  		t.Fatalf("Cannot write snapshot data: %v", err)
  1693  	}
  1694  	rawData := buf.String() // Capture the data written.
  1695  
  1696  	// Make sure the data written does parse.
  1697  	// We don't use this result here because the Prometheus library is more permissive than this test.
  1698  	if _, err := (&expfmt.TextParser{}).TextToMetricFamilies(&buf); err != nil {
  1699  		t.Fatalf("cannot parse data written from snapshots: %v\nraw data:\n%s\n(end of raw data)", err, rawData)
  1700  	}
  1701  
  1702  	// Verify that we see all metrics, and that each time we see a new one, it's one we haven't seen
  1703  	// before.
  1704  	seenMetrics := map[string]bool{}
  1705  	var lastMetric string
  1706  	for lineNumber, line := range strings.Split(rawData, "\n") {
  1707  		t.Logf("Line %d: %q", lineNumber+1, line)
  1708  		if strings.TrimSpace(line) == "" || strings.HasPrefix(line, "#") {
  1709  			continue
  1710  		}
  1711  		strippedMetricName := strings.TrimLeftFunc(line, func(r rune) bool {
  1712  			return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_'
  1713  		})
  1714  		if len(strippedMetricName) == 0 {
  1715  			t.Fatalf("invalid line: %q", line)
  1716  		}
  1717  		if strippedMetricName[0] != '{' && strippedMetricName[0] != ' ' {
  1718  			t.Fatalf("invalid line: %q", line)
  1719  		}
  1720  		metricName := line[:len(line)-len(strippedMetricName)]
  1721  		for _, distribSuffix := range []string{"_sum", "_count", "_bucket", "_min", "_max", "_ssd"} {
  1722  			metricName = strings.TrimSuffix(metricName, distribSuffix)
  1723  		}
  1724  		if lastMetric != "" && lastMetric != metricName && seenMetrics[metricName] {
  1725  			t.Fatalf("line %q: got already-seen metric name %q yet it is not the last metric (%s)", line, metricName, lastMetric)
  1726  		}
  1727  		lastMetric = metricName
  1728  		seenMetrics[metricName] = true
  1729  	}
  1730  	wantSeenMetrics := map[string]bool{
  1731  		fmt.Sprintf("my_little_prefix_%s", fooCounter.PB.GetPrometheusName()):    true,
  1732  		fmt.Sprintf("my_little_prefix_%s", fooInt.PB.GetPrometheusName()):        true,
  1733  		fmt.Sprintf("my_little_prefix_%s", fooDist.PB.GetPrometheusName()):       true,
  1734  		fmt.Sprintf("not_the_same_prefix_%s", fooCounter.PB.GetPrometheusName()): true,
  1735  		fmt.Sprintf("not_the_same_prefix_%s", fooDist.PB.GetPrometheusName()):    true,
  1736  	}
  1737  	if !cmp.Equal(seenMetrics, wantSeenMetrics) {
  1738  		t.Errorf("Seen metrics: %v\nWant metrics: %v", seenMetrics, wantSeenMetrics)
  1739  	}
  1740  }
  1741  
  1742  func TestNumberPacker(t *testing.T) {
  1743  	interestingIntegers := map[uint64]struct{}{
  1744  		uint64(0):                  struct{}{},
  1745  		uint64(0x5555555555555555): struct{}{},
  1746  		uint64(0xaaaaaaaaaaaaaaaa): struct{}{},
  1747  		uint64(0xffffffffffffffff): struct{}{},
  1748  	}
  1749  	for numBits := 0; numBits < 2; numBits++ {
  1750  		newIntegers := map[uint64]struct{}{}
  1751  		for interestingInt := range interestingIntegers {
  1752  			for i := 0; i < 64; i++ {
  1753  				newIntegers[interestingInt|(1<<i)] = struct{}{}
  1754  				newIntegers[interestingInt & ^(1<<i)] = struct{}{}
  1755  			}
  1756  		}
  1757  		for newInt := range newIntegers {
  1758  			interestingIntegers[newInt] = struct{}{}
  1759  		}
  1760  	}
  1761  	for _, i := range []int64{
  1762  		0,
  1763  		-1,
  1764  		math.MinInt,
  1765  		math.MaxInt,
  1766  		math.MinInt8,
  1767  		math.MaxInt8,
  1768  		math.MaxUint8,
  1769  		math.MinInt16,
  1770  		math.MaxInt16,
  1771  		math.MaxUint16,
  1772  		math.MinInt32,
  1773  		math.MaxInt32,
  1774  		math.MaxUint32,
  1775  		math.MinInt64,
  1776  		math.MaxInt64,
  1777  		int64(maxDirectUint),
  1778  	} {
  1779  		for d := int64(-3); d <= int64(3); d++ {
  1780  			interestingIntegers[uint64(i+d)] = struct{}{}
  1781  		}
  1782  	}
  1783  	interestingIntegers[0] = struct{}{}
  1784  	interestingIntegers[1] = struct{}{}
  1785  	interestingIntegers[2] = struct{}{}
  1786  	interestingIntegers[3] = struct{}{}
  1787  	interestingIntegers[math.MaxUint64-3] = struct{}{}
  1788  	interestingIntegers[math.MaxUint64-2] = struct{}{}
  1789  	interestingIntegers[math.MaxUint64-1] = struct{}{}
  1790  	interestingIntegers[math.MaxUint64] = struct{}{}
  1791  
  1792  	interestingFloats := make(map[float64]struct{}, len(interestingIntegers)+21*21+17)
  1793  	for divExp := -10; divExp < 10; divExp++ {
  1794  		div := math.Pow(10, float64(divExp))
  1795  		for i := -10; i < 10; i++ {
  1796  			interestingFloats[float64(i)*div] = struct{}{}
  1797  		}
  1798  	}
  1799  	interestingFloats[0.0] = struct{}{}
  1800  	interestingFloats[math.NaN()] = struct{}{}
  1801  	interestingFloats[math.Inf(1)] = struct{}{}
  1802  	interestingFloats[math.Inf(-1)] = struct{}{}
  1803  	interestingFloats[math.Pi] = struct{}{}
  1804  	interestingFloats[math.Sqrt2] = struct{}{}
  1805  	interestingFloats[math.E] = struct{}{}
  1806  	interestingFloats[math.SqrtE] = struct{}{}
  1807  	interestingFloats[math.Ln2] = struct{}{}
  1808  	interestingFloats[math.MaxFloat32] = struct{}{}
  1809  	interestingFloats[-math.MaxFloat32] = struct{}{}
  1810  	interestingFloats[math.MaxFloat64] = struct{}{}
  1811  	interestingFloats[-math.MaxFloat64] = struct{}{}
  1812  	interestingFloats[math.SmallestNonzeroFloat32] = struct{}{}
  1813  	interestingFloats[-math.SmallestNonzeroFloat32] = struct{}{}
  1814  	interestingFloats[math.SmallestNonzeroFloat64] = struct{}{}
  1815  	interestingFloats[-math.SmallestNonzeroFloat64] = struct{}{}
  1816  	for interestingInt := range interestingIntegers {
  1817  		interestingFloats[math.Float64frombits(interestingInt)] = struct{}{}
  1818  	}
  1819  
  1820  	p := &numberPacker{
  1821  		data: make([]uint64, 0, len(interestingIntegers)+len(interestingFloats)),
  1822  	}
  1823  
  1824  	t.Run("integers", func(t *testing.T) {
  1825  		seenDirectInteger := false
  1826  		seenIndirectInteger := false
  1827  		for interestingInt := range interestingIntegers {
  1828  			orig := NewInt(int64(interestingInt))
  1829  			packed := p.pack(orig)
  1830  			unpacked := p.unpack(packed)
  1831  			if !orig.SameType(unpacked) || orig.Int != unpacked.Int {
  1832  				t.Errorf("integer %v (bits=%x): got packed=%v => unpacked version %v (int: %d)", orig, interestingInt, uint32(packed), unpacked, unpacked.Int)
  1833  			}
  1834  			needsIndirection := needsPackerStorage(orig)
  1835  			switch uint32(packed) & storageField {
  1836  			case storageFieldDirect:
  1837  				seenDirectInteger = true
  1838  				if needsIndirection != 0 {
  1839  					t.Errorf("integer %v (bits=%x): got needsIndirection=%v want %v", orig, interestingInt, needsIndirection, 0)
  1840  				}
  1841  			case storageFieldIndirect:
  1842  				seenIndirectInteger = true
  1843  				if needsIndirection != 1 {
  1844  					t.Errorf("integer %v (bits=%x): got needsIndirection=%v want %v", orig, interestingInt, needsIndirection, 1)
  1845  				}
  1846  			}
  1847  		}
  1848  		if !seenDirectInteger {
  1849  			t.Error("did not encounter any integer that could be packed directly")
  1850  		}
  1851  		if !seenIndirectInteger {
  1852  			t.Error("did not encounter any integer that was packed indirectly")
  1853  		}
  1854  	})
  1855  	t.Run("packing_efficiency", func(t *testing.T) {
  1856  		// Verify that we actually saved space by not packing every number in numberPacker itself.
  1857  		if len(p.data) >= len(interestingIntegers) {
  1858  			t.Errorf("packer had %d data points stored in its data, but we expected some of it to not be stored in it (tried to pack %d integers total)", len(p.data), len(interestingIntegers))
  1859  		}
  1860  	})
  1861  	t.Run("floats", func(t *testing.T) {
  1862  		seenDirectFloat := false
  1863  		seenIndirectFloat := false
  1864  		for interestingFloat := range interestingFloats {
  1865  			orig := NewFloat(interestingFloat)
  1866  			packed := p.pack(orig)
  1867  			unpacked := p.unpack(packed)
  1868  			switch {
  1869  			case interestingFloat == 0: // Zero-valued float becomes an integer.
  1870  				if !unpacked.IsInteger() {
  1871  					t.Errorf("Zero-valued float %v: got non-integer number: %v", orig, unpacked)
  1872  				} else if unpacked.Int != 0 {
  1873  					t.Errorf("Zero-valued float %v: got non-zero integer: %d", orig, unpacked.Int)
  1874  				}
  1875  			case math.IsNaN(orig.Float):
  1876  				if !math.IsNaN(unpacked.Float) {
  1877  					t.Errorf("NaN float %v: got non-NaN unpacked version %v", orig, unpacked)
  1878  				}
  1879  			default: // Not NaN, not integer
  1880  				if !orig.SameType(unpacked) || orig.Float != unpacked.Float {
  1881  					t.Errorf("float %v (64bits=%x, 32bits=%x, float32-encodable=%v): got packed=%x => unpacked version %v (float: %f)", orig, math.Float64bits(interestingFloat), math.Float32bits(float32(interestingFloat)), float64(float32(interestingFloat)) == interestingFloat, uint32(packed), unpacked, unpacked.Float)
  1882  				}
  1883  			}
  1884  			needsIndirection := needsPackerStorage(orig)
  1885  			switch uint32(packed) & storageField {
  1886  			case storageFieldDirect:
  1887  				seenDirectFloat = true
  1888  				if needsIndirection != 0 {
  1889  					t.Errorf("float %v (64bits=%x): got needsIndirection=%v want %v", orig, math.Float64bits(interestingFloat), needsIndirection, 0)
  1890  				}
  1891  			case storageFieldIndirect:
  1892  				seenIndirectFloat = true
  1893  				if needsIndirection != 1 {
  1894  					t.Errorf("float %v (bits=%x): got needsIndirection=%v want %v", orig, math.Float64bits(interestingFloat), needsIndirection, 1)
  1895  				}
  1896  			}
  1897  		}
  1898  		if !seenDirectFloat {
  1899  			t.Error("did not encounter any float that could be packed directly")
  1900  		}
  1901  		if !seenIndirectFloat {
  1902  			t.Error("did not encounter any float that was packed indirectly")
  1903  		}
  1904  	})
  1905  }
  1906  
  1907  func TestNumberPackerCapacity(t *testing.T) {
  1908  	packer := &numberPacker{
  1909  		data: make([]uint64, 0, 2),
  1910  	}
  1911  	checkPanic := func(want bool, fn func()) {
  1912  		t.Helper()
  1913  		defer func() {
  1914  			panicErr := recover()
  1915  			t.Helper()
  1916  			if want && panicErr == nil {
  1917  				t.Error("function did not panic but wanted it to")
  1918  			} else if !want && panicErr != nil {
  1919  				t.Errorf("function unexpectedly panic'd: %v", panicErr)
  1920  			}
  1921  		}()
  1922  		fn()
  1923  	}
  1924  	t.Run("number that does not need indirection", func(t *testing.T) {
  1925  		checkPanic(false, func() {
  1926  			packer.pack(&Number{Int: 1})
  1927  		})
  1928  	})
  1929  	t.Run("first number that needs indirection fits", func(t *testing.T) {
  1930  		checkPanic(false, func() {
  1931  			packer.pack(&Number{Int: int64(maxDirectUint + 3)})
  1932  		})
  1933  	})
  1934  	t.Run("second number that needs indirection also fits", func(t *testing.T) {
  1935  		checkPanic(false, func() {
  1936  			packer.pack(&Number{Int: int64(maxDirectUint + 2)})
  1937  		})
  1938  	})
  1939  	t.Run("third number that needs indirection does not", func(t *testing.T) {
  1940  		checkPanic(true, func() {
  1941  			packer.pack(&Number{Int: int64(maxDirectUint + 1)})
  1942  		})
  1943  	})
  1944  	t.Run("second number that does not need indirection still fits", func(t *testing.T) {
  1945  		checkPanic(false, func() {
  1946  			packer.pack(&Number{Int: int64(maxDirectUint)})
  1947  		})
  1948  	})
  1949  }