github.com/rudderlabs/rudder-go-kit@v0.30.0/stats/internal/otel/otel_test.go (about)

     1  package otel
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math"
     7  	"net/http"
     8  	"net/http/httptest"
     9  	"os"
    10  	"path/filepath"
    11  	"strconv"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/ory/dockertest/v3"
    16  	"github.com/ory/dockertest/v3/docker"
    17  	"github.com/prometheus/client_golang/prometheus"
    18  	"github.com/prometheus/client_golang/prometheus/promhttp"
    19  	promClient "github.com/prometheus/client_model/go"
    20  	"github.com/stretchr/testify/require"
    21  	"go.opentelemetry.io/otel"
    22  	"go.opentelemetry.io/otel/attribute"
    23  	"go.opentelemetry.io/otel/metric"
    24  	sdkmetric "go.opentelemetry.io/otel/sdk/metric"
    25  
    26  	"github.com/rudderlabs/rudder-go-kit/httputil"
    27  	statsTest "github.com/rudderlabs/rudder-go-kit/stats/testhelper"
    28  	"github.com/rudderlabs/rudder-go-kit/testhelper"
    29  	dt "github.com/rudderlabs/rudder-go-kit/testhelper/docker"
    30  )
    31  
    32  const (
    33  	metricsPort = "8889"
    34  )
    35  
    36  var (
    37  	globalDefaultAttrs = []*promClient.LabelPair{
    38  		{Name: ptr("service_version"), Value: ptr("v1.2.3")},
    39  		{Name: ptr("telemetry_sdk_language"), Value: ptr("go")},
    40  		{Name: ptr("telemetry_sdk_name"), Value: ptr("opentelemetry")},
    41  		{Name: ptr("telemetry_sdk_version"), Value: ptr(otel.Version())},
    42  		{Name: ptr("instanceName"), Value: ptr("my-instance-id")},
    43  	}
    44  	globalGRPCDefaultAttrs = append(globalDefaultAttrs,
    45  		// the label1=value1 is coming from the otel-collector-config.yaml (see const_labels)
    46  		&promClient.LabelPair{Name: ptr("label1"), Value: ptr("value1")},
    47  	)
    48  )
    49  
    50  // see https://opentelemetry.io/docs/collector/getting-started/
    51  func TestMetrics(t *testing.T) {
    52  	var (
    53  		ctx       = context.Background()
    54  		meterName = "some-meter-name"
    55  		svcName   = "TestMetrics"
    56  	)
    57  	scenarios := []testCase{
    58  		{
    59  			name:             "grpc",
    60  			additionalLabels: globalGRPCDefaultAttrs,
    61  			setupMeterProvider: func(t testing.TB, _ ...MeterProviderOption) (*sdkmetric.MeterProvider, string) {
    62  				cwd, err := os.Getwd()
    63  				require.NoError(t, err)
    64  				container, grpcEndpoint := statsTest.StartOTelCollector(t, metricsPort,
    65  					filepath.Join(cwd, "testdata", "otel-collector-config.yaml"),
    66  				)
    67  
    68  				res, err := NewResource(svcName, "v1.2.3",
    69  					attribute.String("instanceName", "my-instance-id"),
    70  				)
    71  				require.NoError(t, err)
    72  				var om Manager
    73  				tp, mp, err := om.Setup(ctx, res,
    74  					WithInsecure(),
    75  					WithTracerProvider(grpcEndpoint, WithTracingSamplingRate(1.0)),
    76  					WithMeterProvider(
    77  						WithGRPCMeterProvider(grpcEndpoint),
    78  						WithMeterProviderExportsInterval(100*time.Millisecond),
    79  						WithDefaultHistogramBucketBoundaries([]float64{1, 2, 3}),
    80  						WithHistogramBucketBoundaries("baz", meterName, []float64{10, 20, 30}),
    81  					),
    82  				)
    83  				require.NoError(t, err)
    84  				t.Cleanup(func() { require.NoError(t, om.Shutdown(context.Background())) })
    85  				require.NotEqual(t, tp, otel.GetTracerProvider())
    86  				require.NotEqual(t, mp, otel.GetMeterProvider())
    87  
    88  				metricsEndpoint := fmt.Sprintf("http://localhost:%d/metrics", dt.GetHostPort(t, metricsPort, container))
    89  				return mp, metricsEndpoint
    90  			},
    91  		},
    92  		{
    93  			name:             "prometheus",
    94  			additionalLabels: globalDefaultAttrs,
    95  			setupMeterProvider: func(t testing.TB, _ ...MeterProviderOption) (*sdkmetric.MeterProvider, string) {
    96  				registry := prometheus.NewRegistry()
    97  
    98  				res, err := NewResource(svcName, "v1.2.3",
    99  					attribute.String("instanceName", "my-instance-id"),
   100  				)
   101  				require.NoError(t, err)
   102  				var om Manager
   103  				tp, mp, err := om.Setup(ctx, res,
   104  					WithInsecure(),
   105  					WithMeterProvider(
   106  						WithPrometheusExporter(registry),
   107  						WithMeterProviderExportsInterval(100*time.Millisecond),
   108  						WithDefaultHistogramBucketBoundaries([]float64{1, 2, 3}),
   109  						WithHistogramBucketBoundaries("baz", meterName, []float64{10, 20, 30}),
   110  					),
   111  				)
   112  				require.NoError(t, err)
   113  				t.Cleanup(func() { require.NoError(t, om.Shutdown(context.Background())) })
   114  				require.Nil(t, tp)
   115  				require.NotEqual(t, mp, otel.GetMeterProvider())
   116  
   117  				ts := httptest.NewServer(promhttp.InstrumentMetricHandler(
   118  					registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}),
   119  				))
   120  				t.Cleanup(ts.Close)
   121  
   122  				return mp, ts.URL
   123  			},
   124  		},
   125  	}
   126  
   127  	for _, scenario := range scenarios {
   128  		t.Run(scenario.name, func(t *testing.T) {
   129  			mp, metricsEndpoint := scenario.setupMeterProvider(t)
   130  			m := mp.Meter(meterName)
   131  			// foo counter
   132  			counter, err := m.Int64Counter("foo")
   133  			require.NoError(t, err)
   134  			counter.Add(ctx, 1, metric.WithAttributes(attribute.String("hello", "world")))
   135  			// bar counter
   136  			counter, err = m.Int64Counter("bar")
   137  			require.NoError(t, err)
   138  			counter.Add(ctx, 5)
   139  			// baz histogram
   140  			h1, err := m.Int64Histogram("baz")
   141  			require.NoError(t, err)
   142  			h1.Record(ctx, 20, metric.WithAttributes(attribute.String("a", "b")))
   143  			// qux histogram
   144  			h2, err := m.Int64Histogram("qux")
   145  			require.NoError(t, err)
   146  			h2.Record(ctx, 2, metric.WithAttributes(attribute.String("c", "d")))
   147  
   148  			// Run assertions
   149  			metrics := requireMetrics(t, metricsEndpoint, "foo", "bar", "baz", "qux")
   150  
   151  			require.EqualValues(t, ptr("foo"), metrics["foo"].Name)
   152  			require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics["foo"].Type)
   153  			require.Len(t, metrics["foo"].Metric, 1)
   154  			require.EqualValues(t, &promClient.Counter{Value: ptr(1.0)}, metrics["foo"].Metric[0].Counter)
   155  			require.ElementsMatch(t, append(
   156  				scenario.additionalLabels,
   157  				&promClient.LabelPair{Name: ptr("hello"), Value: ptr("world")},
   158  				&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   159  				&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   160  			), metrics["foo"].Metric[0].Label)
   161  
   162  			require.EqualValues(t, ptr("bar"), metrics["bar"].Name)
   163  			require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics["bar"].Type)
   164  			require.Len(t, metrics["bar"].Metric, 1)
   165  			require.EqualValues(t, &promClient.Counter{Value: ptr(5.0)}, metrics["bar"].Metric[0].Counter)
   166  			require.ElementsMatch(t, append(
   167  				scenario.additionalLabels,
   168  				&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   169  				&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   170  			), metrics["bar"].Metric[0].Label)
   171  
   172  			requireHistogramEqual(t, metrics["baz"], histogram{
   173  				name: "baz", count: 1, sum: 20,
   174  				buckets: []*promClient.Bucket{
   175  					{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)},
   176  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(20.0)},
   177  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)},
   178  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   179  				},
   180  				labels: append(
   181  					scenario.additionalLabels,
   182  					&promClient.LabelPair{Name: ptr("a"), Value: ptr("b")},
   183  					&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   184  					&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   185  				),
   186  			})
   187  
   188  			requireHistogramEqual(t, metrics["qux"], histogram{
   189  				name: "qux", count: 1, sum: 2,
   190  				buckets: []*promClient.Bucket{
   191  					{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(1.0)},
   192  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(2.0)},
   193  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(3.0)},
   194  					{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   195  				},
   196  				labels: append(
   197  					scenario.additionalLabels,
   198  					&promClient.LabelPair{Name: ptr("c"), Value: ptr("d")},
   199  					&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   200  					&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   201  				),
   202  			})
   203  		})
   204  	}
   205  }
   206  
   207  func TestHistogramBuckets(t *testing.T) {
   208  	var (
   209  		ctx     = context.Background()
   210  		svcName = "TestHistogramBuckets"
   211  	)
   212  	scenarios := []testCase{
   213  		{
   214  			name:             "grpc",
   215  			additionalLabels: globalGRPCDefaultAttrs,
   216  			setupMeterProvider: func(t testing.TB, opts ...MeterProviderOption) (*sdkmetric.MeterProvider, string) {
   217  				cwd, err := os.Getwd()
   218  				require.NoError(t, err)
   219  				container, grpcEndpoint := statsTest.StartOTelCollector(t, metricsPort,
   220  					filepath.Join(cwd, "testdata", "otel-collector-config.yaml"),
   221  				)
   222  
   223  				res, err := NewResource(svcName, "v1.2.3", attribute.String("instanceName", "my-instance-id"))
   224  				require.NoError(t, err)
   225  				var om Manager
   226  				_, mp, err := om.Setup(ctx, res,
   227  					WithInsecure(),
   228  					WithMeterProvider(append(opts,
   229  						WithGRPCMeterProvider(grpcEndpoint),
   230  						WithMeterProviderExportsInterval(50*time.Millisecond),
   231  					)...),
   232  				)
   233  				require.NoError(t, err)
   234  				t.Cleanup(func() { require.NoError(t, om.Shutdown(context.Background())) })
   235  				require.NotEqual(t, mp, otel.GetMeterProvider())
   236  
   237  				metricsEndpoint := fmt.Sprintf("http://localhost:%d/metrics", dt.GetHostPort(t, metricsPort, container))
   238  				return mp, metricsEndpoint
   239  			},
   240  		},
   241  		{
   242  			name:             "prometheus",
   243  			additionalLabels: globalDefaultAttrs,
   244  			setupMeterProvider: func(t testing.TB, opts ...MeterProviderOption) (*sdkmetric.MeterProvider, string) {
   245  				registry := prometheus.NewRegistry()
   246  
   247  				res, err := NewResource(svcName, "v1.2.3", attribute.String("instanceName", "my-instance-id"))
   248  				require.NoError(t, err)
   249  				var om Manager
   250  				tp, mp, err := om.Setup(ctx, res,
   251  					WithInsecure(),
   252  					WithMeterProvider(append(opts,
   253  						WithPrometheusExporter(registry),
   254  						WithMeterProviderExportsInterval(50*time.Millisecond),
   255  					)...),
   256  				)
   257  				require.NoError(t, err)
   258  				t.Cleanup(func() { require.NoError(t, om.Shutdown(context.Background())) })
   259  				require.Nil(t, tp)
   260  				require.NotEqual(t, mp, otel.GetMeterProvider())
   261  
   262  				ts := httptest.NewServer(promhttp.InstrumentMetricHandler(
   263  					registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}),
   264  				))
   265  				t.Cleanup(ts.Close)
   266  
   267  				return mp, ts.URL
   268  			},
   269  		},
   270  	}
   271  
   272  	for _, scenario := range scenarios {
   273  		t.Run(scenario.name, func(t *testing.T) {
   274  			t.Run("default applies to all meters", func(t *testing.T) {
   275  				mp, metricsEndpoint := scenario.setupMeterProvider(t,
   276  					WithDefaultHistogramBucketBoundaries([]float64{10, 20, 30}),
   277  				)
   278  
   279  				// foo histogram on meter-1
   280  				h, err := mp.Meter("meter-1").Int64Histogram("foo")
   281  				require.NoError(t, err)
   282  				h.Record(ctx, 20, metric.WithAttributes(attribute.String("a", "b")))
   283  
   284  				// bar histogram on meter-2
   285  				h, err = mp.Meter("meter-2").Int64Histogram("bar")
   286  				require.NoError(t, err)
   287  				h.Record(ctx, 30, metric.WithAttributes(attribute.String("c", "d")))
   288  
   289  				metrics := requireMetrics(t, metricsEndpoint, "foo", "bar")
   290  
   291  				requireHistogramEqual(t, metrics["foo"], histogram{
   292  					name: "foo", count: 1, sum: 20,
   293  					buckets: []*promClient.Bucket{
   294  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)},
   295  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(20.0)},
   296  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)},
   297  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   298  					},
   299  					labels: append(
   300  						scenario.additionalLabels,
   301  						&promClient.LabelPair{Name: ptr("a"), Value: ptr("b")},
   302  						&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   303  						&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   304  					),
   305  				})
   306  
   307  				requireHistogramEqual(t, metrics["bar"], histogram{
   308  					name: "bar", count: 1, sum: 30,
   309  					buckets: []*promClient.Bucket{
   310  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)},
   311  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(20.0)},
   312  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)},
   313  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   314  					},
   315  					labels: append(
   316  						scenario.additionalLabels,
   317  						&promClient.LabelPair{Name: ptr("c"), Value: ptr("d")},
   318  						&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   319  						&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   320  					),
   321  				})
   322  			})
   323  
   324  			t.Run("custom boundaries do not override default ones", func(t *testing.T) {
   325  				mp, metricsEndpoint := scenario.setupMeterProvider(t,
   326  					WithDefaultHistogramBucketBoundaries([]float64{10, 20, 30}),
   327  					WithHistogramBucketBoundaries("bar", "meter-1", []float64{40, 50, 60}),
   328  					WithHistogramBucketBoundaries("baz", "meter-1", []float64{70, 80, 90}),
   329  				)
   330  
   331  				// foo histogram
   332  				h, err := mp.Meter("meter-1").Int64Histogram("foo")
   333  				require.NoError(t, err)
   334  				h.Record(ctx, 20, metric.WithAttributes(attribute.String("a", "b")))
   335  
   336  				// bar histogram
   337  				h, err = mp.Meter("meter-1").Int64Histogram("bar")
   338  				require.NoError(t, err)
   339  				h.Record(ctx, 50, metric.WithAttributes(attribute.String("c", "d")))
   340  
   341  				// baz histogram
   342  				h, err = mp.Meter("meter-1").Int64Histogram("baz")
   343  				require.NoError(t, err)
   344  				h.Record(ctx, 80, metric.WithAttributes(attribute.String("e", "f")))
   345  
   346  				metrics := requireMetrics(t, metricsEndpoint, "foo", "bar", "baz")
   347  
   348  				requireHistogramEqual(t, metrics["foo"], histogram{
   349  					name: "foo", count: 1, sum: 20,
   350  					buckets: []*promClient.Bucket{
   351  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)},
   352  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(20.0)},
   353  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)},
   354  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   355  					},
   356  					labels: append(
   357  						scenario.additionalLabels,
   358  						&promClient.LabelPair{Name: ptr("a"), Value: ptr("b")},
   359  						&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   360  						&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   361  					),
   362  				})
   363  
   364  				requireHistogramEqual(t, metrics["bar"], histogram{
   365  					name: "bar", count: 1, sum: 50,
   366  					buckets: []*promClient.Bucket{
   367  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(40.0)},
   368  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(50.0)},
   369  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(60.0)},
   370  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   371  					},
   372  					labels: append(
   373  						scenario.additionalLabels,
   374  						&promClient.LabelPair{Name: ptr("c"), Value: ptr("d")},
   375  						&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   376  						&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   377  					),
   378  				})
   379  
   380  				requireHistogramEqual(t, metrics["baz"], histogram{
   381  					name: "baz", count: 1, sum: 80,
   382  					buckets: []*promClient.Bucket{
   383  						{CumulativeCount: ptr(uint64(0)), UpperBound: ptr(70.0)},
   384  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(80.0)},
   385  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(90.0)},
   386  						{CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))},
   387  					},
   388  					labels: append(
   389  						scenario.additionalLabels,
   390  						&promClient.LabelPair{Name: ptr("e"), Value: ptr("f")},
   391  						&promClient.LabelPair{Name: ptr("job"), Value: &svcName},
   392  						&promClient.LabelPair{Name: ptr("service_name"), Value: &svcName},
   393  					),
   394  				})
   395  			})
   396  		})
   397  	}
   398  }
   399  
   400  func TestCollectorGlobals(t *testing.T) {
   401  	grpcPort, err := testhelper.GetFreePort()
   402  	require.NoError(t, err)
   403  
   404  	pool, err := dockertest.NewPool("")
   405  	require.NoError(t, err)
   406  
   407  	collector, err := pool.RunWithOptions(&dockertest.RunOptions{
   408  		Repository: "otel/opentelemetry-collector",
   409  		Tag:        "0.67.0",
   410  		PortBindings: map[docker.Port][]docker.PortBinding{
   411  			"4317/tcp": {{HostPort: strconv.Itoa(grpcPort)}},
   412  		},
   413  	})
   414  	require.NoError(t, err)
   415  	t.Cleanup(func() {
   416  		if err := pool.Purge(collector); err != nil {
   417  			t.Logf("Could not purge resource: %v", err)
   418  		}
   419  	})
   420  
   421  	var (
   422  		om       Manager
   423  		ctx      = context.Background()
   424  		endpoint = fmt.Sprintf("localhost:%d", grpcPort)
   425  	)
   426  	res, err := NewResource(t.Name(), "v1.2.3", attribute.String("instanceName", "my-instance-id"))
   427  	require.NoError(t, err)
   428  	tp, mp, err := om.Setup(ctx, res,
   429  		WithInsecure(),
   430  		WithTracerProvider(endpoint, WithTracingSamplingRate(1.0), WithGlobalTracerProvider()),
   431  		WithMeterProvider(WithGRPCMeterProvider(endpoint), WithGlobalMeterProvider()),
   432  	)
   433  	require.NoError(t, err)
   434  	t.Cleanup(func() { require.NoError(t, om.Shutdown(context.Background())) })
   435  	require.Equal(t, tp, otel.GetTracerProvider())
   436  	require.Equal(t, mp, otel.GetMeterProvider())
   437  }
   438  
   439  func TestNonBlockingConnection(t *testing.T) {
   440  	grpcPort, err := testhelper.GetFreePort()
   441  	require.NoError(t, err)
   442  
   443  	res, err := NewResource(t.Name(), "v1.2.3",
   444  		attribute.String("instanceName", "my-instance-id"),
   445  	)
   446  	require.NoError(t, err)
   447  
   448  	var (
   449  		om       Manager
   450  		ctx      = context.Background()
   451  		endpoint = fmt.Sprintf("localhost:%d", grpcPort)
   452  	)
   453  	_, mp, err := om.Setup(ctx, res,
   454  		WithInsecure(),
   455  		WithMeterProvider(
   456  			WithGRPCMeterProvider(endpoint),
   457  			WithMeterProviderExportsInterval(100*time.Millisecond),
   458  		),
   459  		WithRetryConfig(RetryConfig{
   460  			Enabled:         true,
   461  			InitialInterval: time.Second,
   462  			MaxInterval:     time.Second,
   463  			MaxElapsedTime:  time.Minute,
   464  		}),
   465  	)
   466  	require.NoError(t, err)
   467  	defer func() {
   468  		require.NoError(t, om.Shutdown(context.Background()))
   469  	}()
   470  
   471  	meter := mp.Meter("test")
   472  	fooCounter, err := meter.Int64Counter("foo")
   473  	require.NoError(t, err)
   474  	barCounter, err := meter.Float64Counter("bar")
   475  	require.NoError(t, err)
   476  
   477  	// this counter will not be lost even though the container isn't even started. see MaxElapsedTime.
   478  	fooCounter.Add(ctx, 123, metric.WithAttributes(attribute.String("hello", "world")))
   479  
   480  	cwd, err := os.Getwd()
   481  	require.NoError(t, err)
   482  
   483  	container, _ := statsTest.StartOTelCollector(t, metricsPort,
   484  		filepath.Join(cwd, "testdata", "otel-collector-config.yaml"),
   485  		statsTest.WithStartCollectorPort(grpcPort),
   486  	)
   487  	barCounter.Add(ctx, 456) // this should be recorded
   488  
   489  	metricsEndpoint := fmt.Sprintf("http://localhost:%d/metrics", dt.GetHostPort(t, metricsPort, container))
   490  	metrics := requireMetrics(t, metricsEndpoint, "foo", "bar")
   491  
   492  	defaultAttrs := append(globalGRPCDefaultAttrs,
   493  		&promClient.LabelPair{Name: ptr("job"), Value: ptr("TestNonBlockingConnection")},
   494  		&promClient.LabelPair{Name: ptr("service_name"), Value: ptr("TestNonBlockingConnection")},
   495  	)
   496  
   497  	require.EqualValues(t, ptr("foo"), metrics["foo"].Name)
   498  	require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics["foo"].Type)
   499  	require.Len(t, metrics["foo"].Metric, 1)
   500  	require.EqualValues(t, &promClient.Counter{Value: ptr(123.0)}, metrics["foo"].Metric[0].Counter)
   501  	require.ElementsMatch(t, append(defaultAttrs,
   502  		&promClient.LabelPair{Name: ptr("hello"), Value: ptr("world")},
   503  	), metrics["foo"].Metric[0].Label)
   504  
   505  	require.EqualValues(t, ptr("bar"), metrics["bar"].Name)
   506  	require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics["bar"].Type)
   507  	require.Len(t, metrics["bar"].Metric, 1)
   508  	require.EqualValues(t, &promClient.Counter{Value: ptr(456.0)}, metrics["bar"].Metric[0].Counter)
   509  	require.ElementsMatch(t, defaultAttrs, metrics["bar"].Metric[0].Label)
   510  }
   511  
   512  func requireMetrics(
   513  	t *testing.T, metricsEndpoint string, requiredKeys ...string,
   514  ) map[string]*promClient.MetricFamily {
   515  	t.Helper()
   516  
   517  	var (
   518  		err     error
   519  		resp    *http.Response
   520  		metrics map[string]*promClient.MetricFamily
   521  	)
   522  	require.Eventuallyf(t, func() bool {
   523  		resp, err = http.Get(metricsEndpoint)
   524  		if err != nil {
   525  			return false
   526  		}
   527  		defer func() { httputil.CloseResponse(resp) }()
   528  		metrics, err = statsTest.ParsePrometheusMetrics(resp.Body)
   529  		if err != nil {
   530  			return false
   531  		}
   532  		for _, k := range requiredKeys {
   533  			if _, ok := metrics[k]; !ok {
   534  				return false
   535  			}
   536  		}
   537  		return true
   538  	}, 5*time.Second, 100*time.Millisecond, "err: %v, metrics: %+v", err, metrics)
   539  
   540  	return metrics
   541  }
   542  
   543  func requireHistogramEqual(t *testing.T, mf *promClient.MetricFamily, h histogram) {
   544  	t.Helper()
   545  	require.EqualValues(t, &h.name, mf.Name)
   546  	require.EqualValues(t, ptr(promClient.MetricType_HISTOGRAM), mf.Type)
   547  	require.Len(t, mf.Metric, 1)
   548  	require.EqualValuesf(t, &h.count, mf.Metric[0].Histogram.SampleCount,
   549  		"Got %d, expected %d", *mf.Metric[0].Histogram.SampleCount, h.count,
   550  	)
   551  	require.EqualValuesf(t, &h.sum, mf.Metric[0].Histogram.SampleSum,
   552  		"Got %.2f, expected %.2f", *mf.Metric[0].Histogram.SampleSum, h.sum,
   553  	)
   554  	require.ElementsMatchf(t, h.buckets, mf.Metric[0].Histogram.Bucket, "Buckets for %q do not match", h.name)
   555  	require.ElementsMatch(t, h.labels, mf.Metric[0].Label)
   556  }
   557  
   558  func ptr[T any](v T) *T {
   559  	return &v
   560  }
   561  
   562  type testCase struct {
   563  	name               string
   564  	additionalLabels   []*promClient.LabelPair
   565  	setupMeterProvider func(testing.TB, ...MeterProviderOption) (*sdkmetric.MeterProvider, string)
   566  }
   567  
   568  type histogram struct {
   569  	name    string
   570  	count   uint64
   571  	sum     float64
   572  	buckets []*promClient.Bucket
   573  	labels  []*promClient.LabelPair
   574  }