github.com/rudderlabs/rudder-go-kit@v0.30.0/stats/otel_test.go (about) 1 package stats 2 3 import ( 4 "context" 5 "fmt" 6 "math" 7 "net/http" 8 "os" 9 "path/filepath" 10 "sort" 11 "strings" 12 "sync/atomic" 13 "testing" 14 "time" 15 16 "github.com/golang/mock/gomock" 17 "github.com/ory/dockertest/v3" 18 "github.com/prometheus/client_golang/prometheus" 19 promClient "github.com/prometheus/client_model/go" 20 "github.com/stretchr/testify/require" 21 "go.opentelemetry.io/otel" 22 "go.opentelemetry.io/otel/attribute" 23 otelMetric "go.opentelemetry.io/otel/metric" 24 "go.opentelemetry.io/otel/metric/noop" 25 sdkmetric "go.opentelemetry.io/otel/sdk/metric" 26 "go.opentelemetry.io/otel/sdk/metric/metricdata" 27 "go.opentelemetry.io/otel/sdk/resource" 28 semconv "go.opentelemetry.io/otel/semconv/v1.24.0" 29 30 "github.com/rudderlabs/rudder-go-kit/config" 31 "github.com/rudderlabs/rudder-go-kit/httputil" 32 "github.com/rudderlabs/rudder-go-kit/logger" 33 "github.com/rudderlabs/rudder-go-kit/logger/mock_logger" 34 "github.com/rudderlabs/rudder-go-kit/stats/metric" 35 statsTest "github.com/rudderlabs/rudder-go-kit/stats/testhelper" 36 "github.com/rudderlabs/rudder-go-kit/testhelper" 37 "github.com/rudderlabs/rudder-go-kit/testhelper/assert" 38 "github.com/rudderlabs/rudder-go-kit/testhelper/docker" 39 "github.com/rudderlabs/rudder-go-kit/testhelper/docker/resource/zipkin" 40 ) 41 42 const ( 43 metricsPort = "8889" 44 ) 45 46 var globalDefaultAttrs = []*promClient.LabelPair{ 47 {Name: ptr("instanceName"), Value: ptr("my-instance-id")}, 48 {Name: ptr("service_version"), Value: ptr("v1.2.3")}, 49 {Name: ptr("telemetry_sdk_language"), Value: ptr("go")}, 50 {Name: ptr("telemetry_sdk_name"), Value: ptr("opentelemetry")}, 51 {Name: ptr("telemetry_sdk_version"), Value: ptr(otel.Version())}, 52 } 53 54 func TestOTelMeasurementInvalidOperations(t *testing.T) { 55 s := &otelStats{meter: otel.GetMeterProvider().Meter(t.Name())} 56 57 t.Run("counter invalid operations", func(t *testing.T) { 58 require.Panics(t, func() { 59 s.NewStat("test", CountType).Gauge(1) 60 }) 61 require.Panics(t, func() { 62 s.NewStat("test", CountType).Observe(1.2) 63 }) 64 require.Panics(t, func() { 65 s.NewStat("test", CountType).RecordDuration() 66 }) 67 require.Panics(t, func() { 68 s.NewStat("test", CountType).SendTiming(1) 69 }) 70 require.Panics(t, func() { 71 s.NewStat("test", CountType).Since(time.Now()) 72 }) 73 }) 74 75 t.Run("gauge invalid operations", func(t *testing.T) { 76 require.Panics(t, func() { 77 s.NewStat("test", GaugeType).Increment() 78 }) 79 require.Panics(t, func() { 80 s.NewStat("test", GaugeType).Count(1) 81 }) 82 require.Panics(t, func() { 83 s.NewStat("test", GaugeType).Observe(1.2) 84 }) 85 require.Panics(t, func() { 86 s.NewStat("test", GaugeType).RecordDuration() 87 }) 88 require.Panics(t, func() { 89 s.NewStat("test", GaugeType).SendTiming(1) 90 }) 91 require.Panics(t, func() { 92 s.NewStat("test", GaugeType).Since(time.Now()) 93 }) 94 }) 95 96 t.Run("histogram invalid operations", func(t *testing.T) { 97 require.Panics(t, func() { 98 s.NewStat("test", HistogramType).Increment() 99 }) 100 require.Panics(t, func() { 101 s.NewStat("test", HistogramType).Count(1) 102 }) 103 require.Panics(t, func() { 104 s.NewStat("test", HistogramType).Gauge(1) 105 }) 106 require.Panics(t, func() { 107 s.NewStat("test", HistogramType).RecordDuration() 108 }) 109 require.Panics(t, func() { 110 s.NewStat("test", HistogramType).SendTiming(1) 111 }) 112 require.Panics(t, func() { 113 s.NewStat("test", HistogramType).Since(time.Now()) 114 }) 115 }) 116 117 t.Run("timer invalid operations", func(t *testing.T) { 118 require.Panics(t, func() { 119 s.NewStat("test", TimerType).Increment() 120 }) 121 require.Panics(t, func() { 122 s.NewStat("test", TimerType).Count(1) 123 }) 124 require.Panics(t, func() { 125 s.NewStat("test", TimerType).Gauge(1) 126 }) 127 require.Panics(t, func() { 128 s.NewStat("test", TimerType).Observe(1.2) 129 }) 130 }) 131 } 132 133 func TestOTelMeasurementOperations(t *testing.T) { 134 ctx := context.Background() 135 136 t.Run("counter increment", func(t *testing.T) { 137 r, m := newReaderWithMeter(t) 138 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 139 s.NewStat("test-counter", CountType).Increment() 140 md := getDataPoint[metricdata.Sum[int64]](ctx, t, r, "test-counter", 0) 141 require.Len(t, md.DataPoints, 1) 142 require.EqualValues(t, 1, md.DataPoints[0].Value) 143 }) 144 145 t.Run("counter count", func(t *testing.T) { 146 r, m := newReaderWithMeter(t) 147 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 148 s.NewStat("test-counter", CountType).Count(10) 149 md := getDataPoint[metricdata.Sum[int64]](ctx, t, r, "test-counter", 0) 150 require.Len(t, md.DataPoints, 1) 151 require.EqualValues(t, 10, md.DataPoints[0].Value) 152 }) 153 154 t.Run("gauge", func(t *testing.T) { 155 r, m := newReaderWithMeter(t) 156 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 157 s.NewStat("test-gauge", GaugeType).Gauge(1234) 158 md := getDataPoint[metricdata.Gauge[float64]](ctx, t, r, "test-gauge", 0) 159 require.Len(t, md.DataPoints, 1) 160 require.EqualValues(t, 1234, md.DataPoints[0].Value) 161 }) 162 163 t.Run("tagged gauges", func(t *testing.T) { 164 r, m := newReaderWithMeter(t) 165 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 166 s.NewTaggedStat("test-tagged-gauge", GaugeType, Tags{"a": "b"}).Gauge(111) 167 s.NewTaggedStat("test-tagged-gauge", GaugeType, Tags{"c": "d"}).Gauge(222) 168 md := getDataPoint[metricdata.Gauge[float64]](ctx, t, r, "test-tagged-gauge", 0) 169 require.Len(t, md.DataPoints, 2) 170 // sorting data points by value since the collected time is the same 171 sortDataPointsByValue(md.DataPoints) 172 require.EqualValues(t, 111, md.DataPoints[0].Value) 173 expectedAttrs1 := attribute.NewSet(attribute.String("a", "b")) 174 require.True(t, expectedAttrs1.Equals(&md.DataPoints[0].Attributes)) 175 require.EqualValues(t, 222, md.DataPoints[1].Value) 176 expectedAttrs2 := attribute.NewSet(attribute.String("c", "d")) 177 require.True(t, expectedAttrs2.Equals(&md.DataPoints[1].Attributes)) 178 }) 179 180 t.Run("timer send timing", func(t *testing.T) { 181 r, m := newReaderWithMeter(t) 182 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 183 s.NewStat("test-timer-1", TimerType).SendTiming(10 * time.Second) 184 md := getDataPoint[metricdata.Histogram[float64]](ctx, t, r, "test-timer-1", 0) 185 require.Len(t, md.DataPoints, 1) 186 require.EqualValues(t, 1, md.DataPoints[0].Count) 187 require.InDelta(t, 10.0, md.DataPoints[0].Sum, 0.001) 188 }) 189 190 t.Run("timer since", func(t *testing.T) { 191 r, m := newReaderWithMeter(t) 192 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 193 s.NewStat("test-timer-2", TimerType).Since(time.Now().Add(-time.Second)) 194 md := getDataPoint[metricdata.Histogram[float64]](ctx, t, r, "test-timer-2", 0) 195 require.Len(t, md.DataPoints, 1) 196 require.EqualValues(t, 1, md.DataPoints[0].Count) 197 require.InDelta(t, 1.0, md.DataPoints[0].Sum, 0.001) 198 }) 199 200 t.Run("timer RecordDuration", func(t *testing.T) { 201 r, m := newReaderWithMeter(t) 202 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 203 ot := s.NewStat("test-timer-3", TimerType) 204 ot.(*otelTimer).now = func() time.Time { 205 return time.Now().Add(-time.Second) 206 } 207 ot.RecordDuration()() 208 md := getDataPoint[metricdata.Histogram[float64]](ctx, t, r, "test-timer-3", 0) 209 require.Len(t, md.DataPoints, 1) 210 require.EqualValues(t, 1, md.DataPoints[0].Count) 211 require.InDelta(t, 1.0, md.DataPoints[0].Sum, 0.001) 212 }) 213 214 t.Run("histogram", func(t *testing.T) { 215 r, m := newReaderWithMeter(t) 216 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 217 s.NewStat("test-hist-1", HistogramType).Observe(1.2) 218 md := getDataPoint[metricdata.Histogram[float64]](ctx, t, r, "test-hist-1", 0) 219 require.Len(t, md.DataPoints, 1) 220 require.EqualValues(t, 1, md.DataPoints[0].Count) 221 require.EqualValues(t, 1.2, md.DataPoints[0].Sum) 222 }) 223 224 t.Run("tagged stats", func(t *testing.T) { 225 r, m := newReaderWithMeter(t) 226 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 227 s.NewTaggedStat("test-tagged", CountType, Tags{"key": "value"}).Increment() 228 md1 := getDataPoint[metricdata.Sum[int64]](ctx, t, r, "test-tagged", 0) 229 require.Len(t, md1.DataPoints, 1) 230 require.EqualValues(t, 1, md1.DataPoints[0].Value) 231 expectedAttrs := attribute.NewSet(attribute.String("key", "value")) 232 require.True(t, expectedAttrs.Equals(&md1.DataPoints[0].Attributes)) 233 234 // same measurement name, different measurement type 235 s.NewTaggedStat("test-tagged", GaugeType, Tags{"key": "value"}).Gauge(1234) 236 md2 := getDataPoint[metricdata.Gauge[float64]](ctx, t, r, "test-tagged", 1) 237 require.Len(t, md2.DataPoints, 1) 238 require.EqualValues(t, 1234, md2.DataPoints[0].Value) 239 require.True(t, expectedAttrs.Equals(&md2.DataPoints[0].Attributes)) 240 }) 241 242 t.Run("measurement with empty name", func(t *testing.T) { 243 r, m := newReaderWithMeter(t) 244 s := &otelStats{meter: m, logger: logger.NOP, config: statsConfig{enabled: atomicBool(true)}} 245 s.NewStat("", CountType).Increment() 246 md := getDataPoint[metricdata.Sum[int64]](ctx, t, r, "novalue", 0) 247 require.Len(t, md.DataPoints, 1) 248 require.EqualValues(t, 1, md.DataPoints[0].Value) 249 require.True(t, md.DataPoints[0].Attributes.Equals(newAttributesSet(t))) 250 }) 251 252 t.Run("measurement with empty name and empty tag key", func(t *testing.T) { 253 r, m := newReaderWithMeter(t) 254 s := &otelStats{meter: m, logger: logger.NOP, config: statsConfig{enabled: atomicBool(true)}} 255 s.NewTaggedStat(" ", GaugeType, Tags{"key": "value", "": "value2", " ": "value3"}).Gauge(22) 256 md := getDataPoint[metricdata.Gauge[float64]](ctx, t, r, "novalue", 0) 257 require.Len(t, md.DataPoints, 1) 258 require.EqualValues(t, 22, md.DataPoints[0].Value) 259 require.True(t, md.DataPoints[0].Attributes.Equals(newAttributesSet(t, 260 attribute.String("key", "value"), 261 ))) 262 }) 263 } 264 265 func TestOTelTaggedGauges(t *testing.T) { 266 ctx := context.Background() 267 r, m := newReaderWithMeter(t) 268 s := &otelStats{meter: m, config: statsConfig{enabled: atomicBool(true)}} 269 s.NewTaggedStat("test-gauge", GaugeType, Tags{"a": "b"}).Gauge(1) 270 s.NewStat("test-gauge", GaugeType).Gauge(2) 271 s.NewTaggedStat("test-gauge", GaugeType, Tags{"c": "d"}).Gauge(3) 272 273 rm := metricdata.ResourceMetrics{} 274 err := r.Collect(ctx, &rm) 275 require.NoError(t, err) 276 277 var dp []metricdata.DataPoint[float64] 278 for _, sm := range rm.ScopeMetrics { 279 for _, m := range sm.Metrics { 280 dp = append(dp, m.Data.(metricdata.Gauge[float64]).DataPoints...) 281 } 282 } 283 sortDataPointsByValue(dp) 284 285 require.Len(t, dp, 3) 286 287 require.EqualValues(t, 1, dp[0].Value) 288 expectedAttrs := attribute.NewSet(attribute.String("a", "b")) 289 require.True(t, expectedAttrs.Equals(&dp[0].Attributes)) 290 291 require.EqualValues(t, 2, dp[1].Value) 292 expectedAttrs = attribute.NewSet() 293 require.True(t, expectedAttrs.Equals(&dp[1].Attributes)) 294 295 require.EqualValues(t, 3, dp[2].Value) 296 expectedAttrs = attribute.NewSet(attribute.String("c", "d")) 297 require.True(t, expectedAttrs.Equals(&dp[2].Attributes)) 298 } 299 300 func TestOTelPeriodicStats(t *testing.T) { 301 type expectation struct { 302 name string 303 tags []*promClient.LabelPair 304 } 305 306 cwd, err := os.Getwd() 307 require.NoError(t, err) 308 309 runTest := func(t *testing.T, prepareFunc func(c *config.Config, m metric.Manager), expected []expectation) { 310 container, grpcEndpoint := statsTest.StartOTelCollector(t, metricsPort, 311 filepath.Join(cwd, "testdata", "otel-collector-config.yaml"), 312 ) 313 314 c := config.New() 315 c.Set("INSTANCE_ID", "my-instance-id") 316 c.Set("OpenTelemetry.enabled", true) 317 c.Set("OpenTelemetry.metrics.endpoint", grpcEndpoint) 318 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 319 m := metric.NewManager() 320 prepareFunc(c, m) 321 322 l := logger.NewFactory(c) 323 s := NewStats(c, l, m, 324 WithServiceName("TestOTelPeriodicStats"), 325 WithServiceVersion("v1.2.3"), 326 ) 327 328 // start stats 329 ctx, cancel := context.WithCancel(context.Background()) 330 defer cancel() 331 require.NoError(t, s.Start(ctx, DefaultGoRoutineFactory)) 332 defer s.Stop() 333 334 var ( 335 resp *http.Response 336 metrics map[string]*promClient.MetricFamily 337 metricsEndpoint = fmt.Sprintf("http://localhost:%d/metrics", docker.GetHostPort(t, metricsPort, container)) 338 ) 339 340 require.Eventuallyf(t, func() bool { 341 resp, err = http.Get(metricsEndpoint) 342 if err != nil { 343 return false 344 } 345 defer func() { httputil.CloseResponse(resp) }() 346 metrics, err = statsTest.ParsePrometheusMetrics(resp.Body) 347 if err != nil { 348 return false 349 } 350 for _, exp := range expected { 351 expectedMetricName := strings.ReplaceAll(exp.name, ".", "_") 352 if _, ok := metrics[expectedMetricName]; !ok { 353 return false 354 } 355 } 356 return true 357 }, 10*time.Second, 100*time.Millisecond, "err: %v, metrics: %+v", err, metrics) 358 359 for _, exp := range expected { 360 metricName := strings.ReplaceAll(exp.name, ".", "_") 361 require.EqualValues(t, &metricName, metrics[metricName].Name) 362 require.EqualValues(t, ptr(promClient.MetricType_GAUGE), metrics[metricName].Type) 363 require.Len(t, metrics[metricName].Metric, 1) 364 365 expectedLabels := append(globalDefaultAttrs, 366 // the label1=value1 is coming from the otel-collector-config.yaml (see const_labels) 367 &promClient.LabelPair{Name: ptr("label1"), Value: ptr("value1")}, 368 &promClient.LabelPair{Name: ptr("job"), Value: ptr("TestOTelPeriodicStats")}, 369 &promClient.LabelPair{Name: ptr("service_name"), Value: ptr("TestOTelPeriodicStats")}, 370 ) 371 if exp.tags != nil { 372 expectedLabels = append(expectedLabels, exp.tags...) 373 } 374 require.ElementsMatchf(t, expectedLabels, metrics[metricName].Metric[0].Label, 375 "Got %+v", metrics[metricName].Metric[0].Label, 376 ) 377 } 378 } 379 380 t.Run("CPU stats", func(t *testing.T) { 381 runTest(t, func(c *config.Config, m metric.Manager) { 382 c.Set("RuntimeStats.enableCPUStats", true) 383 c.Set("RuntimeStats.enabledMemStats", false) 384 c.Set("RuntimeStats.enableGCStats", false) 385 }, []expectation{ 386 {name: "runtime_cpu.goroutines"}, 387 {name: "runtime_cpu.cgo_calls"}, 388 }) 389 }) 390 391 t.Run("Mem stats", func(t *testing.T) { 392 runTest(t, func(c *config.Config, m metric.Manager) { 393 c.Set("RuntimeStats.enableCPUStats", false) 394 c.Set("RuntimeStats.enabledMemStats", true) 395 c.Set("RuntimeStats.enableGCStats", false) 396 }, []expectation{ 397 {name: "runtime_mem.alloc"}, 398 {name: "runtime_mem.total"}, 399 {name: "runtime_mem.sys"}, 400 {name: "runtime_mem.lookups"}, 401 {name: "runtime_mem.malloc"}, 402 {name: "runtime_mem.frees"}, 403 {name: "runtime_mem.heap.alloc"}, 404 {name: "runtime_mem.heap.sys"}, 405 {name: "runtime_mem.heap.idle"}, 406 {name: "runtime_mem.heap.inuse"}, 407 {name: "runtime_mem.heap.released"}, 408 {name: "runtime_mem.heap.objects"}, 409 {name: "runtime_mem.stack.inuse"}, 410 {name: "runtime_mem.stack.sys"}, 411 {name: "runtime_mem.stack.mspan_inuse"}, 412 {name: "runtime_mem.stack.mspan_sys"}, 413 {name: "runtime_mem.stack.mcache_inuse"}, 414 {name: "runtime_mem.stack.mcache_sys"}, 415 {name: "runtime_mem.othersys"}, 416 }) 417 }) 418 419 t.Run("MemGC stats", func(t *testing.T) { 420 runTest(t, func(c *config.Config, m metric.Manager) { 421 c.Set("RuntimeStats.enableCPUStats", false) 422 c.Set("RuntimeStats.enabledMemStats", true) 423 c.Set("RuntimeStats.enableGCStats", true) 424 }, []expectation{ 425 {name: "runtime_mem.alloc"}, 426 {name: "runtime_mem.total"}, 427 {name: "runtime_mem.sys"}, 428 {name: "runtime_mem.lookups"}, 429 {name: "runtime_mem.malloc"}, 430 {name: "runtime_mem.frees"}, 431 {name: "runtime_mem.heap.alloc"}, 432 {name: "runtime_mem.heap.sys"}, 433 {name: "runtime_mem.heap.idle"}, 434 {name: "runtime_mem.heap.inuse"}, 435 {name: "runtime_mem.heap.released"}, 436 {name: "runtime_mem.heap.objects"}, 437 {name: "runtime_mem.stack.inuse"}, 438 {name: "runtime_mem.stack.sys"}, 439 {name: "runtime_mem.stack.mspan_inuse"}, 440 {name: "runtime_mem.stack.mspan_sys"}, 441 {name: "runtime_mem.stack.mcache_inuse"}, 442 {name: "runtime_mem.stack.mcache_sys"}, 443 {name: "runtime_mem.othersys"}, 444 {name: "runtime_mem.gc.sys"}, 445 {name: "runtime_mem.gc.next"}, 446 {name: "runtime_mem.gc.last"}, 447 {name: "runtime_mem.gc.pause_total"}, 448 {name: "runtime_mem.gc.pause"}, 449 {name: "runtime_mem.gc.count"}, 450 {name: "runtime_mem.gc.cpu_percent"}, 451 }) 452 }) 453 454 t.Run("Pending events", func(t *testing.T) { 455 runTest(t, func(c *config.Config, m metric.Manager) { 456 c.Set("RuntimeStats.enableCPUStats", false) 457 c.Set("RuntimeStats.enabledMemStats", false) 458 c.Set("RuntimeStats.enableGCStats", false) 459 m.GetRegistry(metric.PublishedMetrics).MustGetGauge( 460 TestMeasurement{tablePrefix: "table", workspace: "workspace", destType: "destType"}, 461 ).Set(1.0) 462 }, []expectation{ 463 {name: "test_measurement_table", tags: []*promClient.LabelPair{ 464 {Name: ptr("destType"), Value: ptr("destType")}, 465 {Name: ptr("workspaceId"), Value: ptr("workspace")}, 466 }}, 467 }) 468 }) 469 } 470 471 func TestOTelExcludedTags(t *testing.T) { 472 cwd, err := os.Getwd() 473 require.NoError(t, err) 474 container, grpcEndpoint := statsTest.StartOTelCollector(t, metricsPort, 475 filepath.Join(cwd, "testdata", "otel-collector-config.yaml"), 476 ) 477 478 c := config.New() 479 c.Set("INSTANCE_ID", "my-instance-id") 480 c.Set("OpenTelemetry.enabled", true) 481 c.Set("OpenTelemetry.metrics.endpoint", grpcEndpoint) 482 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 483 c.Set("RuntimeStats.enabled", false) 484 c.Set("statsExcludedTags", []string{"workspaceId"}) 485 l := logger.NewFactory(c) 486 m := metric.NewManager() 487 s := NewStats(c, l, m, WithServiceName(t.Name()), WithServiceVersion("v1.2.3")) 488 489 // start stats 490 ctx, cancel := context.WithCancel(context.Background()) 491 defer cancel() 492 require.NoError(t, s.Start(ctx, DefaultGoRoutineFactory)) 493 defer s.Stop() 494 495 metricName := "test_workspaceId" 496 s.NewTaggedStat(metricName, CountType, Tags{ 497 "workspaceId": "nice-value", 498 "should_not_be_filtered": "fancy-value", 499 }).Increment() 500 501 metricsEndpoint := fmt.Sprintf("http://localhost:%d/metrics", docker.GetHostPort(t, metricsPort, container)) 502 metrics := requireMetrics(t, metricsEndpoint, metricName) 503 504 require.EqualValues(t, &metricName, metrics[metricName].Name) 505 require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics[metricName].Type) 506 require.Len(t, metrics[metricName].Metric, 1) 507 require.EqualValues(t, &promClient.Counter{Value: ptr(1.0)}, metrics[metricName].Metric[0].Counter) 508 require.ElementsMatchf(t, append(globalDefaultAttrs, 509 // the label1=value1 is coming from the otel-collector-config.yaml (see const_labels) 510 &promClient.LabelPair{Name: ptr("label1"), Value: ptr("value1")}, 511 &promClient.LabelPair{Name: ptr("should_not_be_filtered"), Value: ptr("fancy-value")}, 512 &promClient.LabelPair{Name: ptr("job"), Value: ptr("TestOTelExcludedTags")}, 513 &promClient.LabelPair{Name: ptr("service_name"), Value: ptr("TestOTelExcludedTags")}, 514 ), metrics[metricName].Metric[0].Label, "Got %+v", metrics[metricName].Metric[0].Label) 515 } 516 517 func TestOTelStartStopError(t *testing.T) { 518 c := config.New() 519 c.Set("OpenTelemetry.enabled", true) 520 c.Set("OpenTelemetry.metrics.prometheus.enabled", false) 521 l := logger.NewFactory(c) 522 m := metric.NewManager() 523 s := NewStats(c, l, m) 524 525 ctx := context.Background() 526 require.Error(t, s.Start(ctx, DefaultGoRoutineFactory), 527 "we should error if no endpoint is provided but stats are enabled", 528 ) 529 530 done := make(chan struct{}) 531 go func() { 532 s.Stop() // this should not panic/block even if we couldn't start 533 close(done) 534 }() 535 536 select { 537 case <-done: 538 case <-time.After(1 * time.Second): 539 t.Fatal("timeout waiting for Stop()") 540 } 541 } 542 543 func TestOTelMeasurementsConsistency(t *testing.T) { 544 type testCase struct { 545 name string 546 additionalLabels []*promClient.LabelPair 547 setupMeterProvider func(testing.TB) (Stats, string) 548 } 549 scenarios := []testCase{ 550 { 551 name: "grpc", 552 additionalLabels: append(globalDefaultAttrs, 553 // the label1=value1 is coming from the otel-collector-config.yaml (see const_labels) 554 &promClient.LabelPair{Name: ptr("label1"), Value: ptr("value1")}, 555 ), 556 setupMeterProvider: func(t testing.TB) (Stats, string) { 557 cwd, err := os.Getwd() 558 require.NoError(t, err) 559 container, grpcEndpoint := statsTest.StartOTelCollector(t, metricsPort, 560 filepath.Join(cwd, "testdata", "otel-collector-config.yaml"), 561 ) 562 563 c := config.New() 564 c.Set("INSTANCE_ID", "my-instance-id") 565 c.Set("OpenTelemetry.enabled", true) 566 c.Set("OpenTelemetry.metrics.endpoint", grpcEndpoint) 567 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 568 c.Set("RuntimeStats.enabled", false) 569 l := logger.NewFactory(c) 570 m := metric.NewManager() 571 s := NewStats(c, l, m, 572 WithServiceName("TestOTelHistogramBuckets"), 573 WithServiceVersion("v1.2.3"), 574 WithDefaultHistogramBuckets([]float64{10, 20, 30}), 575 WithHistogramBuckets("bar", []float64{40, 50, 60}), 576 ) 577 t.Cleanup(s.Stop) 578 579 return s, fmt.Sprintf("http://localhost:%d/metrics", docker.GetHostPort(t, metricsPort, container)) 580 }, 581 }, 582 { 583 name: "prometheus", 584 additionalLabels: globalDefaultAttrs, 585 setupMeterProvider: func(t testing.TB) (Stats, string) { 586 freePort, err := testhelper.GetFreePort() 587 require.NoError(t, err) 588 589 c := config.New() 590 c.Set("INSTANCE_ID", "my-instance-id") 591 c.Set("OpenTelemetry.enabled", true) 592 c.Set("OpenTelemetry.metrics.prometheus.enabled", true) 593 c.Set("OpenTelemetry.metrics.prometheus.port", freePort) 594 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 595 c.Set("RuntimeStats.enabled", false) 596 l := logger.NewFactory(c) 597 m := metric.NewManager() 598 s := NewStats(c, l, m, 599 WithServiceName("TestOTelHistogramBuckets"), 600 WithServiceVersion("v1.2.3"), 601 WithDefaultHistogramBuckets([]float64{10, 20, 30}), 602 WithHistogramBuckets("bar", []float64{40, 50, 60}), 603 ) 604 t.Cleanup(s.Stop) 605 606 return s, fmt.Sprintf("http://localhost:%d/metrics", freePort) 607 }, 608 }, 609 } 610 611 for _, scenario := range scenarios { 612 t.Run(scenario.name, func(t *testing.T) { 613 s, metricsEndpoint := scenario.setupMeterProvider(t) 614 615 // start stats 616 ctx, cancel := context.WithCancel(context.Background()) 617 defer cancel() 618 require.NoError(t, s.Start(ctx, DefaultGoRoutineFactory)) 619 defer s.Stop() 620 621 s.NewTaggedStat("foo", HistogramType, Tags{"a": "b"}).Observe(20) 622 s.NewTaggedStat("bar", HistogramType, Tags{"c": "d"}).Observe(50) 623 s.NewTaggedStat("baz", CountType, Tags{"e": "f"}).Count(7) 624 s.NewTaggedStat("qux", GaugeType, Tags{"g": "h"}).Gauge(13) 625 s.NewTaggedStat("asd", TimerType, Tags{"i": "l"}).SendTiming(20 * time.Second) 626 627 metrics := requireMetrics(t, metricsEndpoint, "foo", "bar", "baz", "qux", "asd") 628 629 require.EqualValues(t, ptr("foo"), metrics["foo"].Name) 630 require.EqualValues(t, ptr(promClient.MetricType_HISTOGRAM), metrics["foo"].Type) 631 require.Len(t, metrics["foo"].Metric, 1) 632 require.EqualValues(t, ptr(uint64(1)), metrics["foo"].Metric[0].Histogram.SampleCount) 633 require.EqualValues(t, ptr(20.0), metrics["foo"].Metric[0].Histogram.SampleSum) 634 require.EqualValues(t, []*promClient.Bucket{ 635 {CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)}, 636 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(20.0)}, 637 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)}, 638 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))}, 639 }, metrics["foo"].Metric[0].Histogram.Bucket) 640 require.ElementsMatchf(t, append([]*promClient.LabelPair{ 641 {Name: ptr("a"), Value: ptr("b")}, 642 {Name: ptr("job"), Value: ptr("TestOTelHistogramBuckets")}, 643 {Name: ptr("service_name"), Value: ptr("TestOTelHistogramBuckets")}, 644 }, scenario.additionalLabels...), metrics["foo"].Metric[0].Label, "Got %+v", metrics["foo"].Metric[0].Label) 645 646 require.EqualValues(t, ptr("bar"), metrics["bar"].Name) 647 require.EqualValues(t, ptr(promClient.MetricType_HISTOGRAM), metrics["bar"].Type) 648 require.Len(t, metrics["bar"].Metric, 1) 649 require.EqualValues(t, ptr(uint64(1)), metrics["bar"].Metric[0].Histogram.SampleCount) 650 require.EqualValues(t, ptr(50.0), metrics["bar"].Metric[0].Histogram.SampleSum) 651 require.EqualValues(t, []*promClient.Bucket{ 652 {CumulativeCount: ptr(uint64(0)), UpperBound: ptr(40.0)}, 653 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(50.0)}, 654 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(60.0)}, 655 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))}, 656 }, metrics["bar"].Metric[0].Histogram.Bucket) 657 require.ElementsMatchf(t, append([]*promClient.LabelPair{ 658 {Name: ptr("c"), Value: ptr("d")}, 659 {Name: ptr("job"), Value: ptr("TestOTelHistogramBuckets")}, 660 {Name: ptr("service_name"), Value: ptr("TestOTelHistogramBuckets")}, 661 }, scenario.additionalLabels...), metrics["bar"].Metric[0].Label, "Got %+v", metrics["bar"].Metric[0].Label) 662 663 require.EqualValues(t, ptr("baz"), metrics["baz"].Name) 664 require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics["baz"].Type) 665 require.Len(t, metrics["baz"].Metric, 1) 666 require.EqualValues(t, ptr(7.0), metrics["baz"].Metric[0].Counter.Value) 667 require.ElementsMatchf(t, append([]*promClient.LabelPair{ 668 {Name: ptr("e"), Value: ptr("f")}, 669 {Name: ptr("job"), Value: ptr("TestOTelHistogramBuckets")}, 670 {Name: ptr("service_name"), Value: ptr("TestOTelHistogramBuckets")}, 671 }, scenario.additionalLabels...), metrics["baz"].Metric[0].Label, "Got %+v", metrics["baz"].Metric[0].Label) 672 673 require.EqualValues(t, ptr("qux"), metrics["qux"].Name) 674 require.EqualValues(t, ptr(promClient.MetricType_GAUGE), metrics["qux"].Type) 675 require.Len(t, metrics["qux"].Metric, 1) 676 require.EqualValues(t, ptr(13.0), metrics["qux"].Metric[0].Gauge.Value) 677 require.ElementsMatchf(t, append([]*promClient.LabelPair{ 678 {Name: ptr("g"), Value: ptr("h")}, 679 {Name: ptr("job"), Value: ptr("TestOTelHistogramBuckets")}, 680 {Name: ptr("service_name"), Value: ptr("TestOTelHistogramBuckets")}, 681 }, scenario.additionalLabels...), metrics["qux"].Metric[0].Label, "Got %+v", metrics["qux"].Metric[0].Label) 682 683 require.EqualValues(t, ptr("asd"), metrics["asd"].Name) 684 require.EqualValues(t, ptr(promClient.MetricType_HISTOGRAM), metrics["asd"].Type) 685 require.Len(t, metrics["asd"].Metric, 1) 686 require.EqualValues(t, []*promClient.Bucket{ 687 {CumulativeCount: ptr(uint64(0)), UpperBound: ptr(10.0)}, 688 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(20.0)}, 689 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(30.0)}, 690 {CumulativeCount: ptr(uint64(1)), UpperBound: ptr(math.Inf(1))}, 691 }, metrics["asd"].Metric[0].Histogram.Bucket) 692 require.ElementsMatchf(t, append([]*promClient.LabelPair{ 693 {Name: ptr("i"), Value: ptr("l")}, 694 {Name: ptr("job"), Value: ptr("TestOTelHistogramBuckets")}, 695 {Name: ptr("service_name"), Value: ptr("TestOTelHistogramBuckets")}, 696 }, scenario.additionalLabels...), metrics["asd"].Metric[0].Label, "Got %+v", metrics["asd"].Metric[0].Label) 697 }) 698 } 699 } 700 701 func TestPrometheusCustomRegistry(t *testing.T) { 702 metricName := "foo" 703 setup := func(t testing.TB) (prometheus.Registerer, int) { 704 freePort, err := testhelper.GetFreePort() 705 require.NoError(t, err) 706 707 c := config.New() 708 c.Set("INSTANCE_ID", "my-instance-id") 709 c.Set("OpenTelemetry.enabled", true) 710 c.Set("OpenTelemetry.metrics.prometheus.enabled", true) 711 c.Set("OpenTelemetry.metrics.prometheus.port", freePort) 712 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 713 c.Set("RuntimeStats.enabled", false) 714 l := logger.NewFactory(c) 715 m := metric.NewManager() 716 r := prometheus.NewRegistry() 717 s := NewStats(c, l, m, 718 WithServiceName("TestPrometheusCustomRegistry"), 719 WithServiceVersion("v1.2.3"), 720 WithPrometheusRegistry(r, r), 721 ) 722 require.NoError(t, s.Start(context.Background(), DefaultGoRoutineFactory)) 723 t.Cleanup(s.Stop) 724 725 s.NewTaggedStat(metricName, CountType, Tags{"a": "b"}).Count(7) 726 727 return r, freePort 728 } 729 730 t.Run("http", func(t *testing.T) { 731 var ( 732 err error 733 resp *http.Response 734 metrics map[string]*promClient.MetricFamily 735 _, serverPort = setup(t) 736 metricsEndpoint = fmt.Sprintf("http://localhost:%d/metrics", serverPort) 737 ) 738 require.Eventuallyf(t, func() bool { 739 resp, err = http.Get(metricsEndpoint) 740 if err != nil { 741 return false 742 } 743 defer func() { httputil.CloseResponse(resp) }() 744 metrics, err = statsTest.ParsePrometheusMetrics(resp.Body) 745 if err != nil { 746 return false 747 } 748 if _, ok := metrics[metricName]; !ok { 749 return false 750 } 751 return true 752 }, 10*time.Second, 100*time.Millisecond, "err: %v, metrics: %+v", err, metrics) 753 754 require.EqualValues(t, &metricName, metrics[metricName].Name) 755 require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics[metricName].Type) 756 require.Len(t, metrics[metricName].Metric, 1) 757 require.EqualValues(t, &promClient.Counter{Value: ptr(7.0)}, metrics[metricName].Metric[0].Counter) 758 require.ElementsMatchf(t, append(globalDefaultAttrs, 759 &promClient.LabelPair{Name: ptr("a"), Value: ptr("b")}, 760 &promClient.LabelPair{Name: ptr("job"), Value: ptr("TestPrometheusCustomRegistry")}, 761 &promClient.LabelPair{Name: ptr("service_name"), Value: ptr("TestPrometheusCustomRegistry")}, 762 ), metrics[metricName].Metric[0].Label, "Got %+v", metrics[metricName].Metric[0].Label) 763 }) 764 765 t.Run("collector", func(t *testing.T) { 766 r, _ := setup(t) 767 metrics, err := r.(prometheus.Gatherer).Gather() 768 require.NoError(t, err) 769 770 var mf *promClient.MetricFamily 771 for _, m := range metrics { 772 if m.GetName() == metricName { 773 mf = m 774 break 775 } 776 } 777 require.NotNilf(t, mf, "Metric not found in %+v", metrics) 778 require.EqualValues(t, metricName, mf.GetName()) 779 require.EqualValues(t, promClient.MetricType_COUNTER, mf.GetType()) 780 require.Len(t, mf.GetMetric(), 1) 781 require.ElementsMatch(t, append(globalDefaultAttrs, 782 &promClient.LabelPair{Name: ptr("a"), Value: ptr("b")}, 783 &promClient.LabelPair{Name: ptr("job"), Value: ptr("TestPrometheusCustomRegistry")}, 784 &promClient.LabelPair{Name: ptr("service_name"), Value: ptr("TestPrometheusCustomRegistry")}, 785 ), mf.GetMetric()[0].GetLabel()) 786 require.EqualValues(t, ptr(7.0), mf.GetMetric()[0].GetCounter().Value) 787 }) 788 } 789 790 func TestPrometheusDuplicatedAttributes(t *testing.T) { 791 freePort, err := testhelper.GetFreePort() 792 require.NoError(t, err) 793 794 c := config.New() 795 c.Set("INSTANCE_ID", "my-instance-id") 796 c.Set("OpenTelemetry.enabled", true) 797 c.Set("OpenTelemetry.metrics.prometheus.enabled", true) 798 c.Set("OpenTelemetry.metrics.prometheus.port", freePort) 799 c.Set("OpenTelemetry.metrics.exportInterval", time.Millisecond) 800 c.Set("RuntimeStats.enabled", false) 801 ctrl := gomock.NewController(t) 802 loggerSpy := mock_logger.NewMockLogger(ctrl) 803 loggerSpy.EXPECT().Infof(gomock.Any(), gomock.Any()).AnyTimes() 804 loggerSpy.EXPECT().Warnf( 805 "removing tag %q for measurement %q since it is a resource attribute", 806 "instanceName", "foo", 807 ).Times(1) 808 loggerFactory := mock_logger.NewMockLogger(ctrl) 809 loggerFactory.EXPECT().Child(gomock.Any()).Times(1).Return(loggerSpy) 810 l := newLoggerSpyFactory(loggerFactory) 811 m := metric.NewManager() 812 r := prometheus.NewRegistry() 813 s := NewStats(c, l, m, 814 WithServiceName(t.Name()), 815 WithServiceVersion("v1.2.3"), 816 WithPrometheusRegistry(r, r), 817 ) 818 require.NoError(t, s.Start(context.Background(), DefaultGoRoutineFactory)) 819 t.Cleanup(s.Stop) 820 821 metricName := "foo" 822 s.NewTaggedStat(metricName, CountType, Tags{"a": "b", "instanceName": "from-metric"}).Count(7) 823 824 var ( 825 resp *http.Response 826 metrics map[string]*promClient.MetricFamily 827 metricsEndpoint = fmt.Sprintf("http://localhost:%d/metrics", freePort) 828 ) 829 require.Eventuallyf(t, func() bool { 830 resp, err = http.Get(metricsEndpoint) 831 if err != nil { 832 return false 833 } 834 defer func() { httputil.CloseResponse(resp) }() 835 metrics, err = statsTest.ParsePrometheusMetrics(resp.Body) 836 if err != nil { 837 return false 838 } 839 if _, ok := metrics[metricName]; !ok { 840 return false 841 } 842 return true 843 }, 10*time.Second, 100*time.Millisecond, "err: %v, metrics: %+v", err, metrics) 844 845 require.EqualValues(t, &metricName, metrics[metricName].Name) 846 require.EqualValues(t, ptr(promClient.MetricType_COUNTER), metrics[metricName].Type) 847 require.Len(t, metrics[metricName].Metric, 1) 848 require.EqualValues(t, &promClient.Counter{Value: ptr(7.0)}, metrics[metricName].Metric[0].Counter) 849 require.ElementsMatchf(t, append(globalDefaultAttrs, 850 &promClient.LabelPair{Name: ptr("a"), Value: ptr("b")}, 851 &promClient.LabelPair{Name: ptr("job"), Value: ptr(t.Name())}, 852 &promClient.LabelPair{Name: ptr("service_name"), Value: ptr(t.Name())}, 853 ), metrics[metricName].Metric[0].Label, "Got %+v", metrics[metricName].Metric[0].Label) 854 } 855 856 func TestNoopTracingNoPanics(t *testing.T) { 857 freePort, err := testhelper.GetFreePort() 858 require.NoError(t, err) 859 860 conf := config.New() 861 conf.Set("INSTANCE_ID", t.Name()) 862 conf.Set("OpenTelemetry.enabled", true) 863 conf.Set("RuntimeStats.enabled", false) 864 conf.Set("OpenTelemetry.metrics.prometheus.enabled", true) 865 conf.Set("OpenTelemetry.metrics.prometheus.port", freePort) 866 l := logger.NewFactory(conf) 867 m := metric.NewManager() 868 s := NewStats(conf, l, m, WithServiceName(t.Name())) 869 870 ctx, cancel := context.WithCancel(context.Background()) 871 defer cancel() 872 require.NoError(t, s.Start(ctx, DefaultGoRoutineFactory)) 873 t.Cleanup(s.Stop) 874 875 _, span := s.NewTracer("my-tracer").Start( 876 ctx, "my-span", SpanKindServer, SpanWithTimestamp(time.Now()), SpanWithTags(Tags{"foo": "bar"}), 877 ) 878 time.Sleep(123 * time.Millisecond) 879 span.End() 880 } 881 882 func TestZipkin(t *testing.T) { 883 pool, err := dockertest.NewPool("") 884 require.NoError(t, err) 885 886 zipkinContainer, err := zipkin.Setup(pool, t) 887 require.NoError(t, err) 888 889 zipkinURL := "http://localhost:" + zipkinContainer.Port + "/api/v2/spans" 890 891 conf := config.New() 892 conf.Set("INSTANCE_ID", t.Name()) 893 conf.Set("OpenTelemetry.enabled", true) 894 conf.Set("RuntimeStats.enabled", false) 895 conf.Set("OpenTelemetry.traces.endpoint", zipkinURL) 896 conf.Set("OpenTelemetry.traces.samplingRate", 1.0) 897 conf.Set("OpenTelemetry.traces.withSyncer", true) 898 conf.Set("OpenTelemetry.traces.withZipkin", true) 899 l := logger.NewFactory(conf) 900 m := metric.NewManager() 901 s := NewStats(conf, l, m, WithServiceName(t.Name())) 902 903 ctx, cancel := context.WithCancel(context.Background()) 904 defer cancel() 905 require.NoError(t, s.Start(ctx, DefaultGoRoutineFactory)) 906 t.Cleanup(s.Stop) 907 908 tracer := s.NewTracer("my-tracer") 909 require.Equalf(t, tracer, s.NewTracer("my-tracer"), "tracer should be cached") 910 911 _, span := tracer.Start( 912 ctx, "my-span", SpanKindServer, SpanWithTimestamp(time.Now()), SpanWithTags(Tags{"foo": "bar"}), 913 ) 914 time.Sleep(123 * time.Millisecond) 915 span.End() 916 917 zipkinSpansURL := zipkinURL + "?serviceName=" + t.Name() 918 getSpansReq, err := http.NewRequest(http.MethodGet, zipkinSpansURL, nil) 919 require.NoError(t, err) 920 921 spansBody := assert.RequireEventuallyStatusCode(t, http.StatusOK, getSpansReq) 922 require.Equal(t, `["my-span"]`, spansBody) 923 } 924 925 func TestInvalidInstrument(t *testing.T) { 926 newStats := func(t *testing.T, match string) *otelStats { 927 ctrl := gomock.NewController(t) 928 l := mock_logger.NewMockLogger(ctrl) 929 l.EXPECT().Warnf(containsMatcher(match), gomock.Any()).Times(1) 930 931 enabled := atomic.Bool{} 932 enabled.Store(true) 933 934 return &otelStats{ 935 config: statsConfig{enabled: &enabled}, 936 meter: sdkmetric.NewMeterProvider().Meter(""), 937 noopMeter: noop.NewMeterProvider().Meter(""), 938 logger: l, 939 } 940 } 941 942 t.Run("counter", func(t *testing.T) { 943 s := newStats(t, "failed to create instrument") 944 require.NotPanics(t, func() { 945 m := s.getMeasurement("_#@!?", CountType, nil) 946 m.Increment() 947 }) 948 }) 949 t.Run("gauge", func(t *testing.T) { 950 s := newStats(t, "failed to create gauge") 951 require.NotPanics(t, func() { 952 m := s.getMeasurement("_#@!?", GaugeType, nil) 953 m.Gauge(123) 954 }) 955 }) 956 t.Run("timer", func(t *testing.T) { 957 s := newStats(t, "failed to create instrument") 958 require.NotPanics(t, func() { 959 m := s.getMeasurement("_#@!?", TimerType, nil) 960 m.SendTiming(123 * time.Millisecond) 961 }) 962 }) 963 t.Run("histogram", func(t *testing.T) { 964 s := newStats(t, "failed to create instrument") 965 require.NotPanics(t, func() { 966 m := s.getMeasurement("_#@!?", HistogramType, nil) 967 m.Observe(123) 968 }) 969 }) 970 } 971 972 func getDataPoint[T any](ctx context.Context, t *testing.T, rdr sdkmetric.Reader, name string, idx int) (zero T) { 973 t.Helper() 974 rm := metricdata.ResourceMetrics{} 975 err := rdr.Collect(ctx, &rm) 976 require.NoError(t, err) 977 require.GreaterOrEqual(t, len(rm.ScopeMetrics), 1) 978 require.GreaterOrEqual(t, len(rm.ScopeMetrics[0].Metrics), idx+1) 979 require.Equal(t, name, rm.ScopeMetrics[0].Metrics[idx].Name) 980 md, ok := rm.ScopeMetrics[0].Metrics[idx].Data.(T) 981 require.Truef(t, ok, "Metric data is not of type %T but %T", zero, rm.ScopeMetrics[0].Metrics[idx].Data) 982 return md 983 } 984 985 func sortDataPointsByValue[N int64 | float64](dp []metricdata.DataPoint[N]) { 986 sort.Slice(dp, func(i, j int) bool { 987 return dp[i].Value < dp[j].Value 988 }) 989 } 990 991 func newAttributesSet(t *testing.T, attrs ...attribute.KeyValue) *attribute.Set { 992 t.Helper() 993 set := attribute.NewSet(attrs...) 994 return &set 995 } 996 997 func newReaderWithMeter(t *testing.T) (sdkmetric.Reader, otelMetric.Meter) { 998 t.Helper() 999 manualRdr := sdkmetric.NewManualReader() 1000 meterProvider := sdkmetric.NewMeterProvider( 1001 sdkmetric.WithResource(resource.NewSchemaless(semconv.ServiceNameKey.String(t.Name()))), 1002 sdkmetric.WithReader(manualRdr), 1003 ) 1004 t.Cleanup(func() { 1005 _ = meterProvider.Shutdown(context.Background()) 1006 }) 1007 return manualRdr, meterProvider.Meter(t.Name()) 1008 } 1009 1010 func requireMetrics( 1011 t *testing.T, metricsEndpoint string, requiredKeys ...string, 1012 ) map[string]*promClient.MetricFamily { 1013 t.Helper() 1014 1015 var ( 1016 err error 1017 resp *http.Response 1018 metrics map[string]*promClient.MetricFamily 1019 ) 1020 require.Eventuallyf(t, func() bool { 1021 resp, err = http.Get(metricsEndpoint) 1022 if err != nil { 1023 return false 1024 } 1025 defer func() { httputil.CloseResponse(resp) }() 1026 metrics, err = statsTest.ParsePrometheusMetrics(resp.Body) 1027 if err != nil { 1028 return false 1029 } 1030 for _, k := range requiredKeys { 1031 if _, ok := metrics[k]; !ok { 1032 return false 1033 } 1034 } 1035 return true 1036 }, 5*time.Second, 100*time.Millisecond, "err: %v, metrics: %+v", err, metrics) 1037 1038 return metrics 1039 } 1040 1041 func ptr[T any](v T) *T { 1042 return &v 1043 } 1044 1045 func atomicBool(b bool) *atomic.Bool { // nolint:unparam 1046 a := atomic.Bool{} 1047 a.Store(b) 1048 return &a 1049 } 1050 1051 type TestMeasurement struct { 1052 tablePrefix string 1053 workspace string 1054 destType string 1055 } 1056 1057 func (r TestMeasurement) GetName() string { 1058 return fmt.Sprintf("test_measurement_%s", r.tablePrefix) 1059 } 1060 1061 func (r TestMeasurement) GetTags() map[string]string { 1062 return map[string]string{ 1063 "workspaceId": r.workspace, 1064 "destType": r.destType, 1065 } 1066 } 1067 1068 type loggerSpyFactory struct{ spy logger.Logger } 1069 1070 func (f loggerSpyFactory) NewLogger() logger.Logger { return f.spy } 1071 1072 func newLoggerSpyFactory(l logger.Logger) loggerFactory { 1073 return &loggerSpyFactory{spy: l} 1074 } 1075 1076 type containsMatcher string 1077 1078 func (m containsMatcher) String() string { return string(m) } 1079 func (m containsMatcher) Matches(arg any) bool { 1080 return strings.Contains(arg.(string), string(m)) 1081 }