google.golang.org/grpc@v1.72.2/stats/opentelemetry/internal/testutils/testutils.go (about) 1 /* 2 * Copyright 2024 gRPC authors. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 // Package testutils contains helpers for OpenTelemetry tests. 18 package testutils 19 20 import ( 21 "context" 22 "fmt" 23 "slices" 24 "testing" 25 "time" 26 27 "go.opentelemetry.io/otel/attribute" 28 "go.opentelemetry.io/otel/sdk/metric" 29 "go.opentelemetry.io/otel/sdk/metric/metricdata" 30 "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" 31 ) 32 33 // Redefine default bounds here to avoid a cyclic dependency with top level 34 // opentelemetry package. Could define once through internal, but would make 35 // external opentelemetry godoc less readable. 36 var ( 37 // DefaultLatencyBounds are the default bounds for latency metrics. 38 DefaultLatencyBounds = []float64{0, 0.00001, 0.00005, 0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.008, 0.01, 0.013, 0.016, 0.02, 0.025, 0.03, 0.04, 0.05, 0.065, 0.08, 0.1, 0.13, 0.16, 0.2, 0.25, 0.3, 0.4, 0.5, 0.65, 0.8, 1, 2, 5, 10, 20, 50, 100} 39 // DefaultSizeBounds are the default bounds for metrics which record size. 40 DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} 41 ) 42 43 // waitForServerCompletedRPCs waits until the unary and streaming stats.End 44 // calls are finished processing. It does this by waiting for the expected 45 // metric triggered by stats.End to appear through the passed in metrics reader. 46 // 47 // Returns a new gotMetrics map containing the metric data being polled for, or 48 // an error if failed to wait for metric. 49 func waitForServerCompletedRPCs(ctx context.Context, reader metric.Reader, wantMetric metricdata.Metrics) (map[string]metricdata.Metrics, error) { 50 for ; ctx.Err() == nil; <-time.After(time.Millisecond) { 51 rm := &metricdata.ResourceMetrics{} 52 reader.Collect(ctx, rm) 53 gotMetrics := map[string]metricdata.Metrics{} 54 for _, sm := range rm.ScopeMetrics { 55 for _, m := range sm.Metrics { 56 gotMetrics[m.Name] = m 57 } 58 } 59 val, ok := gotMetrics[wantMetric.Name] 60 if !ok { 61 continue 62 } 63 switch data := val.Data.(type) { 64 case metricdata.Histogram[int64]: 65 if len(wantMetric.Data.(metricdata.Histogram[int64]).DataPoints) > len(data.DataPoints) { 66 continue 67 } 68 case metricdata.Histogram[float64]: 69 if len(wantMetric.Data.(metricdata.Histogram[float64]).DataPoints) > len(data.DataPoints) { 70 continue 71 } 72 } 73 return gotMetrics, nil 74 } 75 return nil, fmt.Errorf("error waiting for metric %v: %v", wantMetric, ctx.Err()) 76 } 77 78 // checkDataPointWithinFiveSeconds checks if the metric passed in contains a 79 // histogram with dataPoints that fall within buckets that are <=5. Returns an 80 // error if check fails. 81 func checkDataPointWithinFiveSeconds(metric metricdata.Metrics) error { 82 histo, ok := metric.Data.(metricdata.Histogram[float64]) 83 if !ok { 84 return fmt.Errorf("metric data is not histogram") 85 } 86 for _, dataPoint := range histo.DataPoints { 87 var boundWithFive int 88 for i, bound := range dataPoint.Bounds { 89 if bound >= 5 { 90 boundWithFive = i 91 } 92 } 93 foundPoint := false 94 for i, count := range dataPoint.BucketCounts { 95 if i >= boundWithFive { 96 return fmt.Errorf("data point not found in bucket <=5 seconds") 97 } 98 if count == 1 { 99 foundPoint = true 100 break 101 } 102 } 103 if !foundPoint { 104 return fmt.Errorf("no data point found for metric") 105 } 106 } 107 return nil 108 } 109 110 // MetricDataOptions are the options used to configure the metricData emissions 111 // of expected metrics data from NewMetricData. 112 type MetricDataOptions struct { 113 // CSMLabels are the csm labels to attach to metrics which receive csm 114 // labels (all A66 expect client call and started RPC's client and server 115 // side). 116 CSMLabels []attribute.KeyValue 117 // Target is the target of the client and server. 118 Target string 119 // UnaryCallFailed is whether the Unary Call failed, which would trigger 120 // trailers only. 121 UnaryCallFailed bool 122 // UnaryCompressedMessageSize is the compressed message size of the Unary 123 // RPC. This assumes both client and server sent the same message size. 124 UnaryCompressedMessageSize float64 125 // StreamingCompressedMessageSize is the compressed message size of the 126 // Streaming RPC. This assumes both client and server sent the same message 127 // size. 128 StreamingCompressedMessageSize float64 129 } 130 131 // createBucketCounts creates a list of bucket counts based off the 132 // recordingPoints and bounds. Both recordingPoints and bounds are assumed to be 133 // in order. 134 func createBucketCounts(recordingPoints []float64, bounds []float64) []uint64 { 135 var bucketCounts []uint64 136 var recordingPointIndex int 137 for _, bound := range bounds { 138 var bucketCount uint64 139 if recordingPointIndex >= len(recordingPoints) { 140 bucketCounts = append(bucketCounts, bucketCount) 141 continue 142 } 143 for recordingPoints[recordingPointIndex] <= bound { 144 bucketCount++ 145 recordingPointIndex++ 146 if recordingPointIndex >= len(recordingPoints) { 147 break 148 } 149 } 150 bucketCounts = append(bucketCounts, bucketCount) 151 } 152 // The rest of the recording points are last bound -> infinity. 153 bucketCounts = append(bucketCounts, uint64(len(recordingPoints)-recordingPointIndex)) 154 return bucketCounts 155 } 156 157 // MetricDataUnary returns a list of expected metrics defined in A66 for a 158 // client and server for one unary RPC. 159 func MetricDataUnary(options MetricDataOptions) []metricdata.Metrics { 160 methodAttr := attribute.String("grpc.method", "grpc.testing.TestService/UnaryCall") 161 targetAttr := attribute.String("grpc.target", options.Target) 162 statusAttr := attribute.String("grpc.status", "OK") 163 if options.UnaryCallFailed { 164 statusAttr = attribute.String("grpc.status", "UNKNOWN") 165 } 166 clientSideEnd := []attribute.KeyValue{ 167 methodAttr, 168 targetAttr, 169 statusAttr, 170 } 171 serverSideEnd := []attribute.KeyValue{ 172 methodAttr, 173 statusAttr, 174 } 175 clientSideEnd = append(clientSideEnd, options.CSMLabels...) 176 serverSideEnd = append(serverSideEnd, options.CSMLabels...) 177 compressedBytesSentRecv := int64(options.UnaryCompressedMessageSize) 178 bucketCounts := createBucketCounts([]float64{options.UnaryCompressedMessageSize}, DefaultSizeBounds) 179 extrema := metricdata.NewExtrema(int64(options.UnaryCompressedMessageSize)) 180 return []metricdata.Metrics{ 181 { 182 Name: "grpc.client.attempt.started", 183 Description: "Number of client call attempts started.", 184 Unit: "attempt", 185 Data: metricdata.Sum[int64]{ 186 DataPoints: []metricdata.DataPoint[int64]{ 187 { 188 Attributes: attribute.NewSet(methodAttr, targetAttr), 189 Value: 1, 190 }, 191 }, 192 Temporality: metricdata.CumulativeTemporality, 193 IsMonotonic: true, 194 }, 195 }, 196 { 197 Name: "grpc.client.attempt.duration", 198 Description: "End-to-end time taken to complete a client call attempt.", 199 Unit: "s", 200 Data: metricdata.Histogram[float64]{ 201 DataPoints: []metricdata.HistogramDataPoint[float64]{ 202 { 203 Attributes: attribute.NewSet(clientSideEnd...), 204 Count: 1, 205 Bounds: DefaultLatencyBounds, 206 }, 207 }, 208 Temporality: metricdata.CumulativeTemporality, 209 }, 210 }, 211 { 212 Name: "grpc.client.attempt.sent_total_compressed_message_size", 213 Description: "Compressed message bytes sent per client call attempt.", 214 Unit: "By", 215 Data: metricdata.Histogram[int64]{ 216 DataPoints: []metricdata.HistogramDataPoint[int64]{ 217 { 218 Attributes: attribute.NewSet(clientSideEnd...), 219 Count: 1, 220 Bounds: DefaultSizeBounds, 221 BucketCounts: bucketCounts, 222 Min: extrema, 223 Max: extrema, 224 Sum: compressedBytesSentRecv, 225 }, 226 }, 227 Temporality: metricdata.CumulativeTemporality, 228 }, 229 }, 230 { 231 Name: "grpc.client.attempt.rcvd_total_compressed_message_size", 232 Description: "Compressed message bytes received per call attempt.", 233 Unit: "By", 234 Data: metricdata.Histogram[int64]{ 235 DataPoints: []metricdata.HistogramDataPoint[int64]{ 236 { 237 Attributes: attribute.NewSet(clientSideEnd...), 238 Count: 1, 239 Bounds: DefaultSizeBounds, 240 BucketCounts: bucketCounts, 241 Min: extrema, 242 Max: extrema, 243 Sum: compressedBytesSentRecv, 244 }, 245 }, 246 Temporality: metricdata.CumulativeTemporality, 247 }, 248 }, 249 { 250 Name: "grpc.client.call.duration", 251 Description: "Time taken by gRPC to complete an RPC from application's perspective.", 252 Unit: "s", 253 Data: metricdata.Histogram[float64]{ 254 DataPoints: []metricdata.HistogramDataPoint[float64]{ 255 { 256 Attributes: attribute.NewSet(methodAttr, targetAttr, statusAttr), 257 Count: 1, 258 Bounds: DefaultLatencyBounds, 259 }, 260 }, 261 Temporality: metricdata.CumulativeTemporality, 262 }, 263 }, 264 { 265 Name: "grpc.server.call.started", 266 Description: "Number of server calls started.", 267 Unit: "call", 268 Data: metricdata.Sum[int64]{ 269 DataPoints: []metricdata.DataPoint[int64]{ 270 { 271 Attributes: attribute.NewSet(methodAttr), 272 Value: 1, 273 }, 274 }, 275 Temporality: metricdata.CumulativeTemporality, 276 IsMonotonic: true, 277 }, 278 }, 279 { 280 Name: "grpc.server.call.sent_total_compressed_message_size", 281 Unit: "By", 282 Description: "Compressed message bytes sent per server call.", 283 Data: metricdata.Histogram[int64]{ 284 DataPoints: []metricdata.HistogramDataPoint[int64]{ 285 { 286 Attributes: attribute.NewSet(serverSideEnd...), 287 Count: 1, 288 Bounds: DefaultSizeBounds, 289 BucketCounts: bucketCounts, 290 Min: extrema, 291 Max: extrema, 292 Sum: compressedBytesSentRecv, 293 }, 294 }, 295 Temporality: metricdata.CumulativeTemporality, 296 }, 297 }, 298 { 299 Name: "grpc.server.call.rcvd_total_compressed_message_size", 300 Unit: "By", 301 Description: "Compressed message bytes received per server call.", 302 Data: metricdata.Histogram[int64]{ 303 DataPoints: []metricdata.HistogramDataPoint[int64]{ 304 { 305 Attributes: attribute.NewSet(serverSideEnd...), 306 Count: 1, 307 Bounds: DefaultSizeBounds, 308 BucketCounts: bucketCounts, 309 Min: extrema, 310 Max: extrema, 311 Sum: compressedBytesSentRecv, 312 }, 313 }, 314 Temporality: metricdata.CumulativeTemporality, 315 }, 316 }, 317 { 318 Name: "grpc.server.call.duration", 319 Description: "End-to-end time taken to complete a call from server transport's perspective.", 320 Unit: "s", 321 Data: metricdata.Histogram[float64]{ 322 DataPoints: []metricdata.HistogramDataPoint[float64]{ 323 { 324 Attributes: attribute.NewSet(serverSideEnd...), 325 Count: 1, 326 Bounds: DefaultLatencyBounds, 327 }, 328 }, 329 Temporality: metricdata.CumulativeTemporality, 330 }, 331 }, 332 } 333 } 334 335 // MetricDataStreaming returns a list of expected metrics defined in A66 for a 336 // client and server for one streaming RPC. 337 func MetricDataStreaming(options MetricDataOptions) []metricdata.Metrics { 338 methodAttr := attribute.String("grpc.method", "grpc.testing.TestService/FullDuplexCall") 339 targetAttr := attribute.String("grpc.target", options.Target) 340 statusAttr := attribute.String("grpc.status", "OK") 341 clientSideEnd := []attribute.KeyValue{ 342 methodAttr, 343 targetAttr, 344 statusAttr, 345 } 346 serverSideEnd := []attribute.KeyValue{ 347 methodAttr, 348 statusAttr, 349 } 350 clientSideEnd = append(clientSideEnd, options.CSMLabels...) 351 serverSideEnd = append(serverSideEnd, options.CSMLabels...) 352 compressedBytesSentRecv := int64(options.StreamingCompressedMessageSize) 353 bucketCounts := createBucketCounts([]float64{options.StreamingCompressedMessageSize}, DefaultSizeBounds) 354 extrema := metricdata.NewExtrema(int64(options.StreamingCompressedMessageSize)) 355 return []metricdata.Metrics{ 356 { 357 Name: "grpc.client.attempt.started", 358 Description: "Number of client call attempts started.", 359 Unit: "attempt", 360 Data: metricdata.Sum[int64]{ 361 DataPoints: []metricdata.DataPoint[int64]{ 362 { 363 Attributes: attribute.NewSet(methodAttr, targetAttr), 364 Value: 1, 365 }, 366 }, 367 Temporality: metricdata.CumulativeTemporality, 368 IsMonotonic: true, 369 }, 370 }, 371 { 372 Name: "grpc.client.attempt.duration", 373 Description: "End-to-end time taken to complete a client call attempt.", 374 Unit: "s", 375 Data: metricdata.Histogram[float64]{ 376 DataPoints: []metricdata.HistogramDataPoint[float64]{ 377 { 378 Attributes: attribute.NewSet(clientSideEnd...), 379 Count: 1, 380 Bounds: DefaultLatencyBounds, 381 }, 382 }, 383 Temporality: metricdata.CumulativeTemporality, 384 }, 385 }, 386 { 387 Name: "grpc.client.attempt.sent_total_compressed_message_size", 388 Description: "Compressed message bytes sent per client call attempt.", 389 Unit: "By", 390 Data: metricdata.Histogram[int64]{ 391 DataPoints: []metricdata.HistogramDataPoint[int64]{ 392 { 393 Attributes: attribute.NewSet(clientSideEnd...), 394 Count: 1, 395 Bounds: DefaultSizeBounds, 396 BucketCounts: bucketCounts, 397 Min: extrema, 398 Max: extrema, 399 Sum: compressedBytesSentRecv, 400 }, 401 }, 402 Temporality: metricdata.CumulativeTemporality, 403 }, 404 }, 405 { 406 Name: "grpc.client.attempt.rcvd_total_compressed_message_size", 407 Description: "Compressed message bytes received per call attempt.", 408 Unit: "By", 409 Data: metricdata.Histogram[int64]{ 410 DataPoints: []metricdata.HistogramDataPoint[int64]{ 411 { 412 Attributes: attribute.NewSet(clientSideEnd...), 413 Count: 1, 414 Bounds: DefaultSizeBounds, 415 BucketCounts: bucketCounts, 416 Min: extrema, 417 Max: extrema, 418 Sum: compressedBytesSentRecv, 419 }, 420 }, 421 Temporality: metricdata.CumulativeTemporality, 422 }, 423 }, 424 { 425 Name: "grpc.client.call.duration", 426 Description: "Time taken by gRPC to complete an RPC from application's perspective.", 427 Unit: "s", 428 Data: metricdata.Histogram[float64]{ 429 DataPoints: []metricdata.HistogramDataPoint[float64]{ 430 { 431 Attributes: attribute.NewSet(methodAttr, targetAttr, statusAttr), 432 Count: 1, 433 Bounds: DefaultLatencyBounds, 434 }, 435 }, 436 Temporality: metricdata.CumulativeTemporality, 437 }, 438 }, 439 { 440 Name: "grpc.server.call.started", 441 Description: "Number of server calls started.", 442 Unit: "call", 443 Data: metricdata.Sum[int64]{ 444 DataPoints: []metricdata.DataPoint[int64]{ 445 { 446 Attributes: attribute.NewSet(methodAttr), 447 Value: 1, 448 }, 449 }, 450 Temporality: metricdata.CumulativeTemporality, 451 IsMonotonic: true, 452 }, 453 }, 454 { 455 Name: "grpc.server.call.sent_total_compressed_message_size", 456 Unit: "By", 457 Description: "Compressed message bytes sent per server call.", 458 Data: metricdata.Histogram[int64]{ 459 DataPoints: []metricdata.HistogramDataPoint[int64]{ 460 { 461 Attributes: attribute.NewSet(serverSideEnd...), 462 Count: 1, 463 Bounds: DefaultSizeBounds, 464 BucketCounts: bucketCounts, 465 Min: extrema, 466 Max: extrema, 467 Sum: compressedBytesSentRecv, 468 }, 469 }, 470 Temporality: metricdata.CumulativeTemporality, 471 }, 472 }, 473 { 474 Name: "grpc.server.call.rcvd_total_compressed_message_size", 475 Unit: "By", 476 Description: "Compressed message bytes received per server call.", 477 Data: metricdata.Histogram[int64]{ 478 DataPoints: []metricdata.HistogramDataPoint[int64]{ 479 { 480 Attributes: attribute.NewSet(serverSideEnd...), 481 Count: 1, 482 Bounds: DefaultSizeBounds, 483 BucketCounts: bucketCounts, 484 Min: extrema, 485 Max: extrema, 486 Sum: compressedBytesSentRecv, 487 }, 488 }, 489 Temporality: metricdata.CumulativeTemporality, 490 }, 491 }, 492 { 493 Name: "grpc.server.call.duration", 494 Description: "End-to-end time taken to complete a call from server transport's perspective.", 495 Unit: "s", 496 Data: metricdata.Histogram[float64]{ 497 DataPoints: []metricdata.HistogramDataPoint[float64]{ 498 { 499 Attributes: attribute.NewSet(serverSideEnd...), 500 Count: 1, 501 Bounds: DefaultLatencyBounds, 502 }, 503 }, 504 Temporality: metricdata.CumulativeTemporality, 505 }, 506 }, 507 } 508 } 509 510 // MetricData returns a metricsDataSlice for A66 metrics for client and server 511 // with a unary RPC and streaming RPC with certain compression and message flow 512 // sent. If csmAttributes is set to true, the corresponding CSM Metrics (not 513 // client side call metrics, or started on client and server side). 514 func MetricData(options MetricDataOptions) []metricdata.Metrics { 515 unaryMethodAttr := attribute.String("grpc.method", "grpc.testing.TestService/UnaryCall") 516 duplexMethodAttr := attribute.String("grpc.method", "grpc.testing.TestService/FullDuplexCall") 517 targetAttr := attribute.String("grpc.target", options.Target) 518 unaryStatusAttr := attribute.String("grpc.status", "OK") 519 streamingStatusAttr := attribute.String("grpc.status", "OK") 520 if options.UnaryCallFailed { 521 unaryStatusAttr = attribute.String("grpc.status", "UNKNOWN") 522 } 523 unaryMethodClientSideEnd := []attribute.KeyValue{ 524 unaryMethodAttr, 525 targetAttr, 526 unaryStatusAttr, 527 } 528 streamingMethodClientSideEnd := []attribute.KeyValue{ 529 duplexMethodAttr, 530 targetAttr, 531 streamingStatusAttr, 532 } 533 unaryMethodServerSideEnd := []attribute.KeyValue{ 534 unaryMethodAttr, 535 unaryStatusAttr, 536 } 537 streamingMethodServerSideEnd := []attribute.KeyValue{ 538 duplexMethodAttr, 539 streamingStatusAttr, 540 } 541 542 unaryMethodClientSideEnd = append(unaryMethodClientSideEnd, options.CSMLabels...) 543 streamingMethodClientSideEnd = append(streamingMethodClientSideEnd, options.CSMLabels...) 544 unaryMethodServerSideEnd = append(unaryMethodServerSideEnd, options.CSMLabels...) 545 streamingMethodServerSideEnd = append(streamingMethodServerSideEnd, options.CSMLabels...) 546 unaryCompressedBytesSentRecv := int64(options.UnaryCompressedMessageSize) 547 unaryBucketCounts := createBucketCounts([]float64{options.UnaryCompressedMessageSize}, DefaultSizeBounds) 548 unaryExtrema := metricdata.NewExtrema(int64(options.UnaryCompressedMessageSize)) 549 550 streamingCompressedBytesSentRecv := int64(options.StreamingCompressedMessageSize) 551 streamingBucketCounts := createBucketCounts([]float64{options.StreamingCompressedMessageSize}, DefaultSizeBounds) 552 streamingExtrema := metricdata.NewExtrema(int64(options.StreamingCompressedMessageSize)) 553 554 return []metricdata.Metrics{ 555 { 556 Name: "grpc.client.attempt.started", 557 Description: "Number of client call attempts started.", 558 Unit: "attempt", 559 Data: metricdata.Sum[int64]{ 560 DataPoints: []metricdata.DataPoint[int64]{ 561 { 562 Attributes: attribute.NewSet(unaryMethodAttr, targetAttr), 563 Value: 1, 564 }, 565 { 566 Attributes: attribute.NewSet(duplexMethodAttr, targetAttr), 567 Value: 1, 568 }, 569 }, 570 Temporality: metricdata.CumulativeTemporality, 571 IsMonotonic: true, 572 }, 573 }, 574 { 575 Name: "grpc.client.attempt.duration", 576 Description: "End-to-end time taken to complete a client call attempt.", 577 Unit: "s", 578 Data: metricdata.Histogram[float64]{ 579 DataPoints: []metricdata.HistogramDataPoint[float64]{ 580 { 581 Attributes: attribute.NewSet(unaryMethodClientSideEnd...), 582 Count: 1, 583 Bounds: DefaultLatencyBounds, 584 }, 585 { 586 Attributes: attribute.NewSet(streamingMethodClientSideEnd...), 587 Count: 1, 588 Bounds: DefaultLatencyBounds, 589 }, 590 }, 591 Temporality: metricdata.CumulativeTemporality, 592 }, 593 }, 594 { 595 Name: "grpc.client.attempt.sent_total_compressed_message_size", 596 Description: "Compressed message bytes sent per client call attempt.", 597 Unit: "By", 598 Data: metricdata.Histogram[int64]{ 599 DataPoints: []metricdata.HistogramDataPoint[int64]{ 600 { 601 Attributes: attribute.NewSet(unaryMethodClientSideEnd...), 602 Count: 1, 603 Bounds: DefaultSizeBounds, 604 BucketCounts: unaryBucketCounts, 605 Min: unaryExtrema, 606 Max: unaryExtrema, 607 Sum: unaryCompressedBytesSentRecv, 608 }, 609 { 610 Attributes: attribute.NewSet(streamingMethodClientSideEnd...), 611 Count: 1, 612 Bounds: DefaultSizeBounds, 613 BucketCounts: streamingBucketCounts, 614 Min: streamingExtrema, 615 Max: streamingExtrema, 616 Sum: streamingCompressedBytesSentRecv, 617 }, 618 }, 619 Temporality: metricdata.CumulativeTemporality, 620 }, 621 }, 622 { 623 Name: "grpc.client.attempt.rcvd_total_compressed_message_size", 624 Description: "Compressed message bytes received per call attempt.", 625 Unit: "By", 626 Data: metricdata.Histogram[int64]{ 627 DataPoints: []metricdata.HistogramDataPoint[int64]{ 628 { 629 Attributes: attribute.NewSet(unaryMethodClientSideEnd...), 630 Count: 1, 631 Bounds: DefaultSizeBounds, 632 BucketCounts: unaryBucketCounts, 633 Min: unaryExtrema, 634 Max: unaryExtrema, 635 Sum: unaryCompressedBytesSentRecv, 636 }, 637 { 638 Attributes: attribute.NewSet(streamingMethodClientSideEnd...), 639 Count: 1, 640 Bounds: DefaultSizeBounds, 641 BucketCounts: streamingBucketCounts, 642 Min: streamingExtrema, 643 Max: streamingExtrema, 644 Sum: streamingCompressedBytesSentRecv, 645 }, 646 }, 647 Temporality: metricdata.CumulativeTemporality, 648 }, 649 }, 650 { 651 Name: "grpc.client.call.duration", 652 Description: "Time taken by gRPC to complete an RPC from application's perspective.", 653 Unit: "s", 654 Data: metricdata.Histogram[float64]{ 655 DataPoints: []metricdata.HistogramDataPoint[float64]{ 656 { 657 Attributes: attribute.NewSet(unaryMethodAttr, targetAttr, unaryStatusAttr), 658 Count: 1, 659 Bounds: DefaultLatencyBounds, 660 }, 661 { 662 Attributes: attribute.NewSet(duplexMethodAttr, targetAttr, streamingStatusAttr), 663 Count: 1, 664 Bounds: DefaultLatencyBounds, 665 }, 666 }, 667 Temporality: metricdata.CumulativeTemporality, 668 }, 669 }, 670 { 671 Name: "grpc.server.call.started", 672 Description: "Number of server calls started.", 673 Unit: "call", 674 Data: metricdata.Sum[int64]{ 675 DataPoints: []metricdata.DataPoint[int64]{ 676 { 677 Attributes: attribute.NewSet(unaryMethodAttr), 678 Value: 1, 679 }, 680 { 681 Attributes: attribute.NewSet(duplexMethodAttr), 682 Value: 1, 683 }, 684 }, 685 Temporality: metricdata.CumulativeTemporality, 686 IsMonotonic: true, 687 }, 688 }, 689 { 690 Name: "grpc.server.call.sent_total_compressed_message_size", 691 Unit: "By", 692 Description: "Compressed message bytes sent per server call.", 693 Data: metricdata.Histogram[int64]{ 694 DataPoints: []metricdata.HistogramDataPoint[int64]{ 695 { 696 Attributes: attribute.NewSet(unaryMethodServerSideEnd...), 697 Count: 1, 698 Bounds: DefaultSizeBounds, 699 BucketCounts: unaryBucketCounts, 700 Min: unaryExtrema, 701 Max: unaryExtrema, 702 Sum: unaryCompressedBytesSentRecv, 703 }, 704 { 705 Attributes: attribute.NewSet(streamingMethodServerSideEnd...), 706 Count: 1, 707 Bounds: DefaultSizeBounds, 708 BucketCounts: streamingBucketCounts, 709 Min: streamingExtrema, 710 Max: streamingExtrema, 711 Sum: streamingCompressedBytesSentRecv, 712 }, 713 }, 714 Temporality: metricdata.CumulativeTemporality, 715 }, 716 }, 717 { 718 Name: "grpc.server.call.rcvd_total_compressed_message_size", 719 Unit: "By", 720 Description: "Compressed message bytes received per server call.", 721 Data: metricdata.Histogram[int64]{ 722 DataPoints: []metricdata.HistogramDataPoint[int64]{ 723 { 724 Attributes: attribute.NewSet(unaryMethodServerSideEnd...), 725 Count: 1, 726 Bounds: DefaultSizeBounds, 727 BucketCounts: unaryBucketCounts, 728 Min: unaryExtrema, 729 Max: unaryExtrema, 730 Sum: unaryCompressedBytesSentRecv, 731 }, 732 { 733 Attributes: attribute.NewSet(streamingMethodServerSideEnd...), 734 Count: 1, 735 Bounds: DefaultSizeBounds, 736 BucketCounts: streamingBucketCounts, 737 Min: streamingExtrema, 738 Max: streamingExtrema, 739 Sum: streamingCompressedBytesSentRecv, 740 }, 741 }, 742 Temporality: metricdata.CumulativeTemporality, 743 }, 744 }, 745 { 746 Name: "grpc.server.call.duration", 747 Description: "End-to-end time taken to complete a call from server transport's perspective.", 748 Unit: "s", 749 Data: metricdata.Histogram[float64]{ 750 DataPoints: []metricdata.HistogramDataPoint[float64]{ 751 { 752 Attributes: attribute.NewSet(unaryMethodServerSideEnd...), 753 Count: 1, 754 Bounds: DefaultLatencyBounds, 755 }, 756 { 757 Attributes: attribute.NewSet(streamingMethodServerSideEnd...), 758 Count: 1, 759 Bounds: DefaultLatencyBounds, 760 }, 761 }, 762 Temporality: metricdata.CumulativeTemporality, 763 }, 764 }, 765 } 766 } 767 768 // CompareMetrics asserts wantMetrics are what we expect. For duration metrics 769 // makes sure the data point is within possible testing time (five seconds from 770 // context timeout). 771 func CompareMetrics(t *testing.T, gotMetrics map[string]metricdata.Metrics, wantMetrics []metricdata.Metrics) { 772 for _, metric := range wantMetrics { 773 val, ok := gotMetrics[metric.Name] 774 if !ok { 775 t.Errorf("Metric %v not present in recorded metrics", metric.Name) 776 continue 777 } 778 779 if metric.Name == "grpc.server.call.sent_total_compressed_message_size" || metric.Name == "grpc.server.call.rcvd_total_compressed_message_size" { 780 val := gotMetrics[metric.Name] 781 if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) { 782 t.Errorf("Metrics data type not equal for metric: %v", metric.Name) 783 } 784 continue 785 } 786 787 // If one of the duration metrics, ignore the bucket counts, and make 788 // sure it count falls within a bucket <= 5 seconds (maximum duration of 789 // test due to context). 790 if metric.Name == "grpc.client.attempt.duration" || metric.Name == "grpc.client.call.duration" || metric.Name == "grpc.server.call.duration" { 791 val := gotMetrics[metric.Name] 792 if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars(), metricdatatest.IgnoreValue()) { 793 t.Errorf("Metrics data type not equal for metric: %v", metric.Name) 794 } 795 if err := checkDataPointWithinFiveSeconds(val); err != nil { 796 t.Errorf("Data point not within five seconds for metric %v: %v", metric.Name, err) 797 } 798 continue 799 } 800 801 if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) { 802 t.Errorf("Metrics data type not equal for metric: %v", metric.Name) 803 } 804 } 805 } 806 807 // WaitForServerMetrics waits for eventual server metrics (not emitted 808 // synchronously with client side rpc returning). 809 func WaitForServerMetrics(ctx context.Context, t *testing.T, mr *metric.ManualReader, gotMetrics map[string]metricdata.Metrics, wantMetrics []metricdata.Metrics) map[string]metricdata.Metrics { 810 terminalMetrics := []string{ 811 "grpc.server.call.sent_total_compressed_message_size", 812 "grpc.server.call.rcvd_total_compressed_message_size", 813 "grpc.client.attempt.duration", 814 "grpc.client.call.duration", 815 "grpc.server.call.duration", 816 } 817 for _, metric := range wantMetrics { 818 if !slices.Contains(terminalMetrics, metric.Name) { 819 continue 820 } 821 // Sync the metric reader to see the event because stats.End is 822 // handled async server side. Thus, poll until metrics created from 823 // stats.End show up. 824 var err error 825 if gotMetrics, err = waitForServerCompletedRPCs(ctx, mr, metric); err != nil { // move to shared helper 826 t.Fatal(err) 827 } 828 } 829 830 return gotMetrics 831 }