github.com/m3db/m3@v1.5.0/src/cmd/services/m3coordinator/downsample/downsampler_test.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 //nolint: dupl 22 package downsample 23 24 import ( 25 "bytes" 26 "fmt" 27 "os" 28 "strings" 29 "testing" 30 "time" 31 32 "github.com/m3db/m3/src/aggregator/client" 33 clusterclient "github.com/m3db/m3/src/cluster/client" 34 "github.com/m3db/m3/src/cluster/kv" 35 "github.com/m3db/m3/src/cluster/kv/mem" 36 dbclient "github.com/m3db/m3/src/dbnode/client" 37 "github.com/m3db/m3/src/metrics/aggregation" 38 "github.com/m3db/m3/src/metrics/generated/proto/metricpb" 39 "github.com/m3db/m3/src/metrics/generated/proto/rulepb" 40 "github.com/m3db/m3/src/metrics/matcher" 41 "github.com/m3db/m3/src/metrics/metadata" 42 "github.com/m3db/m3/src/metrics/metric/id" 43 "github.com/m3db/m3/src/metrics/metric/unaggregated" 44 "github.com/m3db/m3/src/metrics/policy" 45 "github.com/m3db/m3/src/metrics/rules" 46 ruleskv "github.com/m3db/m3/src/metrics/rules/store/kv" 47 "github.com/m3db/m3/src/metrics/rules/view" 48 "github.com/m3db/m3/src/metrics/transformation" 49 "github.com/m3db/m3/src/query/models" 50 "github.com/m3db/m3/src/query/storage" 51 "github.com/m3db/m3/src/query/storage/m3" 52 "github.com/m3db/m3/src/query/storage/m3/storagemetadata" 53 "github.com/m3db/m3/src/query/storage/mock" 54 "github.com/m3db/m3/src/query/ts" 55 "github.com/m3db/m3/src/x/clock" 56 "github.com/m3db/m3/src/x/ident" 57 "github.com/m3db/m3/src/x/instrument" 58 xio "github.com/m3db/m3/src/x/io" 59 "github.com/m3db/m3/src/x/pool" 60 "github.com/m3db/m3/src/x/serialize" 61 xtest "github.com/m3db/m3/src/x/test" 62 xtime "github.com/m3db/m3/src/x/time" 63 64 "github.com/golang/mock/gomock" 65 "github.com/stretchr/testify/assert" 66 "github.com/stretchr/testify/require" 67 "go.uber.org/zap" 68 ) 69 70 var ( 71 testAggregationType = aggregation.Sum 72 testAggregationStoragePolicies = []policy.StoragePolicy{ 73 policy.MustParseStoragePolicy("2s:1d"), 74 } 75 ) 76 77 const ( 78 nameTag = "__name__" 79 ) 80 81 func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *testing.T) { 82 t.Parallel() 83 84 ctrl := xtest.NewController(t) 85 defer ctrl.Finish() 86 87 gaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{}) 88 require.Equal(t, 1, len(gaugeMetrics)) 89 90 gaugeMetric := gaugeMetrics[0] 91 numSamples := len(gaugeMetric.samples) 92 93 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 94 ingest: &testDownsamplerOptionsIngest{ 95 gaugeMetrics: gaugeMetrics, 96 }, 97 expect: &testDownsamplerOptionsExpect{ 98 writes: []testExpectedWrite{ 99 { 100 tags: gaugeMetric.tags, 101 // NB(nate): Automapping rules generated from cluster namespaces currently 102 // hardcode 'Last' as the aggregation type. As such, expect value to be the last value 103 // in the sample. 104 values: []expectedValue{{value: gaugeMetric.samples[numSamples-1]}}, 105 }, 106 }, 107 }, 108 }) 109 110 require.False(t, testDownsampler.downsampler.Enabled()) 111 112 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 113 114 session := dbclient.NewMockSession(ctrl) 115 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 116 NamespaceID: ident.StringID("2s:1d"), 117 Resolution: 2 * time.Second, 118 Retention: 24 * time.Hour, 119 Session: session, 120 }) 121 122 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 123 124 require.True(t, testDownsampler.downsampler.Enabled()) 125 126 // Test expected output 127 testDownsamplerAggregation(t, testDownsampler) 128 } 129 130 func TestDownsamplerAggregationDownsamplesRawMetricWithRollupRule(t *testing.T) { 131 t.Parallel() 132 133 gaugeMetric := testGaugeMetric{ 134 tags: map[string]string{ 135 nameTag: "http_requests", 136 "app": "nginx_edge", 137 "status_code": "500", 138 "endpoint": "/foo/bar", 139 "not_rolled_up": "not_rolled_up_value", 140 }, 141 timedSamples: []testGaugeMetricTimedSample{ 142 {value: 42}, 143 {value: 64, offset: 1 * time.Second}, 144 }, 145 } 146 res := 1 * time.Second 147 ret := 30 * 24 * time.Hour 148 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 149 rulesConfig: &RulesConfiguration{ 150 RollupRules: []RollupRuleConfiguration{ 151 { 152 Filter: fmt.Sprintf( 153 "%s:http_requests app:* status_code:* endpoint:*", 154 nameTag), 155 Transforms: []TransformConfiguration{ 156 { 157 Transform: &TransformOperationConfiguration{ 158 Type: transformation.PerSecond, 159 }, 160 }, 161 { 162 Rollup: &RollupOperationConfiguration{ 163 MetricName: "http_requests_by_status_code", 164 GroupBy: []string{"app", "status_code", "endpoint"}, 165 Aggregations: []aggregation.Type{aggregation.Sum}, 166 }, 167 }, 168 }, 169 StoragePolicies: []StoragePolicyConfiguration{ 170 { 171 Resolution: res, 172 Retention: ret, 173 }, 174 }, 175 }, 176 }, 177 }, 178 ingest: &testDownsamplerOptionsIngest{ 179 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 180 }, 181 expect: &testDownsamplerOptionsExpect{ 182 writes: []testExpectedWrite{ 183 // aggregated rollup metric 184 { 185 tags: map[string]string{ 186 nameTag: "http_requests_by_status_code", 187 string(rollupTagName): string(rollupTagValue), 188 "app": "nginx_edge", 189 "status_code": "500", 190 "endpoint": "/foo/bar", 191 }, 192 values: []expectedValue{{value: 22}}, 193 attributes: &storagemetadata.Attributes{ 194 MetricsType: storagemetadata.AggregatedMetricsType, 195 Resolution: res, 196 Retention: ret, 197 }, 198 }, 199 // raw aggregated metric 200 { 201 tags: gaugeMetric.tags, 202 values: []expectedValue{{value: 42}, {value: 64}}, 203 }, 204 }, 205 }, 206 }) 207 208 // Setup auto-mapping rules. 209 require.False(t, testDownsampler.downsampler.Enabled()) 210 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 211 ctrl := xtest.NewController(t) 212 defer ctrl.Finish() 213 session := dbclient.NewMockSession(ctrl) 214 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 215 NamespaceID: ident.StringID("1s:30d"), 216 Resolution: res, 217 Retention: ret, 218 Session: session, 219 }) 220 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 221 require.True(t, testDownsampler.downsampler.Enabled()) 222 223 // Test expected output 224 testDownsamplerAggregation(t, testDownsampler) 225 } 226 227 func TestDownsamplerEmptyGroupBy(t *testing.T) { 228 t.Parallel() 229 230 requestMetric := testGaugeMetric{ 231 tags: map[string]string{ 232 nameTag: "http_requests", 233 }, 234 timedSamples: []testGaugeMetricTimedSample{ 235 {value: 42}, 236 {value: 64, offset: 1 * time.Second}, 237 }, 238 } 239 errorMetric := testGaugeMetric{ 240 tags: map[string]string{ 241 nameTag: "http_errors", 242 }, 243 timedSamples: []testGaugeMetricTimedSample{ 244 {value: 43}, 245 {value: 65, offset: 1 * time.Second}, 246 }, 247 } 248 res := 1 * time.Second 249 ret := 30 * 24 * time.Hour 250 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 251 rulesConfig: &RulesConfiguration{ 252 RollupRules: []RollupRuleConfiguration{ 253 { 254 Filter: fmt.Sprintf("%s:http_*", nameTag), 255 Transforms: []TransformConfiguration{ 256 { 257 Transform: &TransformOperationConfiguration{ 258 Type: transformation.PerSecond, 259 }, 260 }, 261 { 262 Rollup: &RollupOperationConfiguration{ 263 MetricName: "http_all", 264 GroupBy: []string{}, 265 Aggregations: []aggregation.Type{aggregation.Sum}, 266 }, 267 }, 268 }, 269 StoragePolicies: []StoragePolicyConfiguration{ 270 { 271 Resolution: res, 272 Retention: ret, 273 }, 274 }, 275 }, 276 }, 277 }, 278 ingest: &testDownsamplerOptionsIngest{ 279 gaugeMetrics: []testGaugeMetric{requestMetric, errorMetric}, 280 }, 281 expect: &testDownsamplerOptionsExpect{ 282 writes: []testExpectedWrite{ 283 // aggregated rollup metric 284 { 285 tags: map[string]string{ 286 nameTag: "http_all", 287 string(rollupTagName): string(rollupTagValue), 288 }, 289 values: []expectedValue{{value: 22 * 2}}, 290 attributes: &storagemetadata.Attributes{ 291 MetricsType: storagemetadata.AggregatedMetricsType, 292 Resolution: res, 293 Retention: ret, 294 }, 295 }, 296 // raw aggregated metric 297 { 298 tags: requestMetric.tags, 299 values: []expectedValue{{value: 42}, {value: 64}}, 300 }, 301 { 302 tags: errorMetric.tags, 303 values: []expectedValue{{value: 43}, {value: 65}}, 304 }, 305 }, 306 }, 307 }) 308 309 // Setup auto-mapping rules. 310 require.False(t, testDownsampler.downsampler.Enabled()) 311 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 312 ctrl := xtest.NewController(t) 313 defer ctrl.Finish() 314 session := dbclient.NewMockSession(ctrl) 315 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 316 NamespaceID: ident.StringID("1s:30d"), 317 Resolution: res, 318 Retention: ret, 319 Session: session, 320 }) 321 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 322 require.True(t, testDownsampler.downsampler.Enabled()) 323 324 // Test expected output 325 testDownsamplerAggregation(t, testDownsampler) 326 } 327 328 func TestDownsamplerAggregationDoesNotDownsampleRawMetricWithRollupRulesWithoutRollup(t *testing.T) { 329 t.Parallel() 330 331 gaugeMetric := testGaugeMetric{ 332 tags: map[string]string{ 333 nameTag: "http_requests", 334 "app": "nginx_edge", 335 "status_code": "500", 336 "endpoint": "/foo/bar", 337 }, 338 timedSamples: []testGaugeMetricTimedSample{ 339 {value: 42}, 340 {value: 64, offset: 1 * time.Second}, 341 }, 342 } 343 res := 1 * time.Second 344 ret := 30 * 24 * time.Hour 345 346 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 347 rulesConfig: &RulesConfiguration{ 348 RollupRules: []RollupRuleConfiguration{ 349 { 350 Filter: fmt.Sprintf( 351 "%s:http_requests app:* status_code:* endpoint:*", 352 nameTag), 353 Transforms: []TransformConfiguration{ 354 { 355 Aggregate: &AggregateOperationConfiguration{ 356 Type: aggregation.Sum, 357 }, 358 }, 359 { 360 Transform: &TransformOperationConfiguration{ 361 Type: transformation.Add, 362 }, 363 }, 364 }, 365 StoragePolicies: []StoragePolicyConfiguration{ 366 { 367 Resolution: res, 368 Retention: ret, 369 }, 370 }, 371 }, 372 }, 373 }, 374 ingest: &testDownsamplerOptionsIngest{ 375 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 376 }, 377 expect: &testDownsamplerOptionsExpect{ 378 writes: []testExpectedWrite{ 379 // mapped metric 380 { 381 tags: map[string]string{ 382 nameTag: "http_requests", 383 "app": "nginx_edge", 384 "status_code": "500", 385 "endpoint": "/foo/bar", 386 }, 387 values: []expectedValue{{value: 42}, {value: 106, offset: 1 * time.Second}}, 388 attributes: &storagemetadata.Attributes{ 389 MetricsType: storagemetadata.AggregatedMetricsType, 390 Resolution: res, 391 Retention: ret, 392 }, 393 }, 394 }, 395 }, 396 }) 397 398 // Setup auto-mapping rules. 399 require.False(t, testDownsampler.downsampler.Enabled()) 400 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 401 ctrl := xtest.NewController(t) 402 defer ctrl.Finish() 403 session := dbclient.NewMockSession(ctrl) 404 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 405 NamespaceID: ident.StringID("1s:30d"), 406 Resolution: res, 407 Retention: ret, 408 Session: session, 409 }) 410 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 411 require.True(t, testDownsampler.downsampler.Enabled()) 412 413 // Test expected output 414 testDownsamplerAggregation(t, testDownsampler) 415 } 416 417 func TestDownsamplerAggregationToggleEnabled(t *testing.T) { 418 t.Parallel() 419 420 ctrl := xtest.NewController(t) 421 defer ctrl.Finish() 422 423 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{}) 424 425 require.False(t, testDownsampler.downsampler.Enabled()) 426 427 // Add an aggregated namespace and expect downsampler to be enabled. 428 session := dbclient.NewMockSession(ctrl) 429 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 430 NamespaceID: ident.StringID("2s:1d"), 431 Resolution: 2 * time.Second, 432 Retention: 24 * time.Hour, 433 Session: session, 434 }) 435 waitForEnabledUpdate(t, &testDownsampler, false) 436 437 require.True(t, testDownsampler.downsampler.Enabled()) 438 439 // Set just an unaggregated namespace and expect downsampler to be disabled. 440 clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{ 441 NamespaceID: ident.StringID("default"), 442 Retention: 48 * time.Hour, 443 Session: session, 444 }) 445 require.NoError(t, err) 446 require.NoError(t, 447 testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces())) 448 449 waitForEnabledUpdate(t, &testDownsampler, true) 450 451 require.False(t, testDownsampler.downsampler.Enabled()) 452 } 453 454 func TestDownsamplerAggregationWithRulesStore(t *testing.T) { 455 t.Parallel() 456 457 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{}) 458 rulesStore := testDownsampler.rulesStore 459 460 // Create rules 461 nss, err := rulesStore.ReadNamespaces() 462 require.NoError(t, err) 463 _, err = nss.AddNamespace("default", testUpdateMetadata()) 464 require.NoError(t, err) 465 466 rule := view.MappingRule{ 467 ID: "mappingrule", 468 Name: "mappingrule", 469 Filter: "app:test*", 470 AggregationID: aggregation.MustCompressTypes(testAggregationType), 471 StoragePolicies: testAggregationStoragePolicies, 472 } 473 474 rs := rules.NewEmptyRuleSet("default", testUpdateMetadata()) 475 _, err = rs.AddMappingRule(rule, testUpdateMetadata()) 476 require.NoError(t, err) 477 478 err = rulesStore.WriteAll(nss, rs) 479 require.NoError(t, err) 480 481 logger := testDownsampler.instrumentOpts.Logger(). 482 With(zap.String("test", t.Name())) 483 484 // Wait for mapping rule to appear 485 logger.Info("waiting for mapping rules to propagate") 486 appender, err := testDownsampler.downsampler.NewMetricsAppender() 487 require.NoError(t, err) 488 appenderImpl := appender.(*metricsAppender) 489 testMatchID := newTestID(t, map[string]string{ 490 "__name__": "foo", 491 "app": "test123", 492 }) 493 for { 494 now := time.Now().UnixNano() 495 res, err := appenderImpl.matcher.ForwardMatch(testMatchID, now, now+1, rules.MatchOptions{ 496 NameAndTagsFn: appenderImpl.nameTagFn, 497 SortedTagIteratorFn: appenderImpl.tagIterFn, 498 }) 499 require.NoError(t, err) 500 results := res.ForExistingIDAt(now) 501 if !results.IsDefault() { 502 break 503 } 504 time.Sleep(100 * time.Millisecond) 505 } 506 507 // Test expected output 508 testDownsamplerAggregation(t, testDownsampler) 509 } 510 511 func TestDownsamplerAggregationWithRulesConfigMappingRules(t *testing.T) { 512 t.Parallel() 513 514 gaugeMetric := testGaugeMetric{ 515 tags: map[string]string{ 516 nameTag: "foo_metric", 517 "app": "nginx_edge", 518 }, 519 timedSamples: []testGaugeMetricTimedSample{ 520 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 521 }, 522 } 523 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 524 rulesConfig: &RulesConfiguration{ 525 MappingRules: []MappingRuleConfiguration{ 526 { 527 Filter: "app:nginx*", 528 Aggregations: []aggregation.Type{aggregation.Max}, 529 StoragePolicies: []StoragePolicyConfiguration{ 530 { 531 Resolution: 1 * time.Second, 532 Retention: 30 * 24 * time.Hour, 533 }, 534 }, 535 }, 536 }, 537 }, 538 ingest: &testDownsamplerOptionsIngest{ 539 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 540 }, 541 expect: &testDownsamplerOptionsExpect{ 542 writes: []testExpectedWrite{ 543 { 544 tags: gaugeMetric.tags, 545 values: []expectedValue{{value: 30}}, 546 attributes: &storagemetadata.Attributes{ 547 MetricsType: storagemetadata.AggregatedMetricsType, 548 Resolution: 1 * time.Second, 549 Retention: 30 * 24 * time.Hour, 550 }, 551 }, 552 }, 553 }, 554 }) 555 556 // Test expected output 557 testDownsamplerAggregation(t, testDownsampler) 558 } 559 560 func TestDownsamplerAggregationWithAutoMappingRulesAndRulesConfigMappingRulesAndDropRule(t *testing.T) { 561 t.Parallel() 562 563 gaugeMetric := testGaugeMetric{ 564 tags: map[string]string{ 565 nameTag: "foo_metric", 566 "app": "nginx_edge", 567 "env": "staging", 568 }, 569 timedSamples: []testGaugeMetricTimedSample{ 570 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 571 }, 572 expectDropPolicyApplied: true, 573 } 574 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 575 autoMappingRules: []m3.ClusterNamespaceOptions{ 576 m3.NewClusterNamespaceOptions( 577 storagemetadata.Attributes{ 578 MetricsType: storagemetadata.AggregatedMetricsType, 579 Retention: 2 * time.Hour, 580 Resolution: 1 * time.Second, 581 }, 582 nil, 583 ), 584 m3.NewClusterNamespaceOptions( 585 storagemetadata.Attributes{ 586 MetricsType: storagemetadata.AggregatedMetricsType, 587 Retention: 12 * time.Hour, 588 Resolution: 5 * time.Second, 589 }, 590 nil, 591 ), 592 }, 593 rulesConfig: &RulesConfiguration{ 594 MappingRules: []MappingRuleConfiguration{ 595 { 596 Filter: "env:staging", 597 Drop: true, 598 }, 599 { 600 Filter: "app:nginx*", 601 Aggregations: []aggregation.Type{aggregation.Max}, 602 StoragePolicies: []StoragePolicyConfiguration{ 603 { 604 Resolution: 10 * time.Second, 605 Retention: 30 * 24 * time.Hour, 606 }, 607 }, 608 }, 609 }, 610 }, 611 ingest: &testDownsamplerOptionsIngest{ 612 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 613 }, 614 expect: &testDownsamplerOptionsExpect{ 615 allowFilter: &testDownsamplerOptionsExpectAllowFilter{ 616 attributes: []storagemetadata.Attributes{ 617 { 618 MetricsType: storagemetadata.AggregatedMetricsType, 619 Resolution: 10 * time.Second, 620 Retention: 30 * 24 * time.Hour, 621 }, 622 }, 623 }, 624 writes: []testExpectedWrite{ 625 { 626 tags: gaugeMetric.tags, 627 values: []expectedValue{{value: 30}}, 628 attributes: &storagemetadata.Attributes{ 629 MetricsType: storagemetadata.AggregatedMetricsType, 630 Resolution: 10 * time.Second, 631 Retention: 30 * 24 * time.Hour, 632 }, 633 }, 634 }, 635 }, 636 }) 637 638 // Test expected output 639 testDownsamplerAggregation(t, testDownsampler) 640 } 641 642 func TestDownsamplerAggregationWithRulesConfigMappingRulesPartialReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) { 643 t.Parallel() 644 645 ctrl := xtest.NewController(t) 646 defer ctrl.Finish() 647 648 gaugeMetric := testGaugeMetric{ 649 tags: map[string]string{ 650 nameTag: "foo_metric", 651 "app": "nginx_edge", 652 }, 653 timedSamples: []testGaugeMetricTimedSample{ 654 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0, offset: 1 * time.Millisecond}, 655 }, 656 } 657 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 658 rulesConfig: &RulesConfiguration{ 659 MappingRules: []MappingRuleConfiguration{ 660 { 661 Filter: "app:nginx*", 662 Aggregations: []aggregation.Type{aggregation.Max}, 663 StoragePolicies: []StoragePolicyConfiguration{ 664 { 665 Resolution: 2 * time.Second, 666 Retention: 24 * time.Hour, 667 }, 668 }, 669 }, 670 }, 671 }, 672 ingest: &testDownsamplerOptionsIngest{ 673 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 674 }, 675 expect: &testDownsamplerOptionsExpect{ 676 writes: []testExpectedWrite{ 677 // Expect the max to be used and override the default auto 678 // mapping rule for the storage policy 2s:24h. 679 { 680 tags: gaugeMetric.tags, 681 values: []expectedValue{{value: 30}}, 682 attributes: &storagemetadata.Attributes{ 683 MetricsType: storagemetadata.AggregatedMetricsType, 684 Resolution: 2 * time.Second, 685 Retention: 24 * time.Hour, 686 }, 687 }, 688 // Expect last to still be used for the storage 689 // policy 4s:48h. 690 { 691 tags: gaugeMetric.tags, 692 // NB(nate): Automapping rules generated from cluster namespaces currently 693 // hardcode 'Last' as the aggregation type. As such, expect value to be the last value 694 // in the sample. 695 values: []expectedValue{{value: 0}}, 696 attributes: &storagemetadata.Attributes{ 697 MetricsType: storagemetadata.AggregatedMetricsType, 698 Resolution: 4 * time.Second, 699 Retention: 48 * time.Hour, 700 }, 701 }, 702 }, 703 }, 704 }) 705 706 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 707 708 session := dbclient.NewMockSession(ctrl) 709 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 710 NamespaceID: ident.StringID("2s:24h"), 711 Resolution: 2 * time.Second, 712 Retention: 24 * time.Hour, 713 Session: session, 714 }, m3.AggregatedClusterNamespaceDefinition{ 715 NamespaceID: ident.StringID("4s:48h"), 716 Resolution: 4 * time.Second, 717 Retention: 48 * time.Hour, 718 Session: session, 719 }) 720 721 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 722 723 // Test expected output 724 testDownsamplerAggregation(t, testDownsampler) 725 } 726 727 func TestDownsamplerAggregationWithRulesConfigMappingRulesReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) { 728 t.Parallel() 729 730 ctrl := xtest.NewController(t) 731 defer ctrl.Finish() 732 733 gaugeMetric := testGaugeMetric{ 734 tags: map[string]string{ 735 nameTag: "foo_metric", 736 "app": "nginx_edge", 737 }, 738 timedSamples: []testGaugeMetricTimedSample{ 739 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 740 }, 741 } 742 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 743 rulesConfig: &RulesConfiguration{ 744 MappingRules: []MappingRuleConfiguration{ 745 { 746 Filter: "app:nginx*", 747 Aggregations: []aggregation.Type{aggregation.Max}, 748 StoragePolicies: []StoragePolicyConfiguration{ 749 { 750 Resolution: 2 * time.Second, 751 Retention: 24 * time.Hour, 752 }, 753 }, 754 }, 755 }, 756 }, 757 ingest: &testDownsamplerOptionsIngest{ 758 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 759 }, 760 expect: &testDownsamplerOptionsExpect{ 761 writes: []testExpectedWrite{ 762 // Expect the max to be used and override the default auto 763 // mapping rule for the storage policy 2s:24h. 764 { 765 tags: gaugeMetric.tags, 766 values: []expectedValue{{value: 30}}, 767 attributes: &storagemetadata.Attributes{ 768 MetricsType: storagemetadata.AggregatedMetricsType, 769 Resolution: 2 * time.Second, 770 Retention: 24 * time.Hour, 771 }, 772 }, 773 }, 774 }, 775 }) 776 777 origStagedMetadata := originalStagedMetadata(t, testDownsampler) 778 779 session := dbclient.NewMockSession(ctrl) 780 setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{ 781 NamespaceID: ident.StringID("2s:24h"), 782 Resolution: 2 * time.Second, 783 Retention: 24 * time.Hour, 784 Session: session, 785 }) 786 787 waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata) 788 789 // Test expected output 790 testDownsamplerAggregation(t, testDownsampler) 791 } 792 793 func TestDownsamplerAggregationWithRulesConfigMappingRulesNoNameTag(t *testing.T) { 794 t.Parallel() 795 796 gaugeMetric := testGaugeMetric{ 797 tags: map[string]string{ 798 "app": "nginx_edge", 799 "endpoint": "health", 800 }, 801 timedSamples: []testGaugeMetricTimedSample{ 802 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 803 }, 804 } 805 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 806 identTag: "endpoint", 807 rulesConfig: &RulesConfiguration{ 808 MappingRules: []MappingRuleConfiguration{ 809 { 810 Filter: "app:nginx*", 811 Aggregations: []aggregation.Type{aggregation.Max}, 812 StoragePolicies: []StoragePolicyConfiguration{ 813 { 814 Resolution: 1 * time.Second, 815 Retention: 30 * 24 * time.Hour, 816 }, 817 }, 818 }, 819 }, 820 }, 821 ingest: &testDownsamplerOptionsIngest{ 822 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 823 }, 824 expect: &testDownsamplerOptionsExpect{ 825 writes: []testExpectedWrite{ 826 { 827 tags: gaugeMetric.tags, 828 values: []expectedValue{{value: 30}}, 829 attributes: &storagemetadata.Attributes{ 830 MetricsType: storagemetadata.AggregatedMetricsType, 831 Resolution: 1 * time.Second, 832 Retention: 30 * 24 * time.Hour, 833 }, 834 }, 835 }, 836 }, 837 }) 838 839 // Test expected output 840 testDownsamplerAggregation(t, testDownsampler) 841 } 842 843 func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilter(t *testing.T) { 844 t.Parallel() 845 846 gaugeMetric := testGaugeMetric{ 847 tags: map[string]string{ 848 "app": "nginx_edge", 849 "endpoint": "health", 850 }, 851 timedSamples: []testGaugeMetricTimedSample{ 852 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 853 }, 854 } 855 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 856 identTag: "endpoint", 857 rulesConfig: &RulesConfiguration{ 858 MappingRules: []MappingRuleConfiguration{ 859 { 860 Filter: "__m3_type__:counter", 861 Aggregations: []aggregation.Type{aggregation.Max}, 862 StoragePolicies: []StoragePolicyConfiguration{ 863 { 864 Resolution: 1 * time.Second, 865 Retention: 30 * 24 * time.Hour, 866 }, 867 }, 868 }, 869 }, 870 }, 871 sampleAppenderOpts: &SampleAppenderOptions{ 872 SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeCounter}, 873 }, 874 ingest: &testDownsamplerOptionsIngest{ 875 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 876 }, 877 expect: &testDownsamplerOptionsExpect{ 878 writes: []testExpectedWrite{ 879 { 880 tags: map[string]string{ 881 "app": "nginx_edge", 882 "endpoint": "health", 883 }, 884 values: []expectedValue{{value: 30}}, 885 attributes: &storagemetadata.Attributes{ 886 MetricsType: storagemetadata.AggregatedMetricsType, 887 Resolution: 1 * time.Second, 888 Retention: 30 * 24 * time.Hour, 889 }, 890 }, 891 }, 892 }, 893 }) 894 895 // Test expected output 896 testDownsamplerAggregation(t, testDownsampler) 897 } 898 899 //nolint:dupl 900 func TestDownsamplerAggregationWithRulesConfigMappingRulesTypePromFilter(t *testing.T) { 901 t.Parallel() 902 903 gaugeMetric := testGaugeMetric{ 904 tags: map[string]string{ 905 "app": "nginx_edge", 906 "endpoint": "health", 907 }, 908 timedSamples: []testGaugeMetricTimedSample{ 909 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 910 }, 911 } 912 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 913 identTag: "endpoint", 914 rulesConfig: &RulesConfiguration{ 915 MappingRules: []MappingRuleConfiguration{ 916 { 917 Filter: "__m3_prom_type__:counter", 918 Aggregations: []aggregation.Type{aggregation.Max}, 919 StoragePolicies: []StoragePolicyConfiguration{ 920 { 921 Resolution: 1 * time.Second, 922 Retention: 30 * 24 * time.Hour, 923 }, 924 }, 925 }, 926 }, 927 }, 928 sampleAppenderOpts: &SampleAppenderOptions{ 929 SeriesAttributes: ts.SeriesAttributes{PromType: ts.PromMetricTypeCounter}, 930 }, 931 ingest: &testDownsamplerOptionsIngest{ 932 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 933 }, 934 expect: &testDownsamplerOptionsExpect{ 935 writes: []testExpectedWrite{ 936 { 937 tags: map[string]string{ 938 "app": "nginx_edge", 939 "endpoint": "health", 940 }, 941 values: []expectedValue{{value: 30}}, 942 attributes: &storagemetadata.Attributes{ 943 MetricsType: storagemetadata.AggregatedMetricsType, 944 Resolution: 1 * time.Second, 945 Retention: 30 * 24 * time.Hour, 946 }, 947 }, 948 }, 949 }, 950 }) 951 952 // Test expected output 953 testDownsamplerAggregation(t, testDownsampler) 954 } 955 956 func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilterNoMatch(t *testing.T) { 957 t.Parallel() 958 959 gaugeMetric := testGaugeMetric{ 960 tags: map[string]string{ 961 "app": "nginx_edge", 962 "endpoint": "health", 963 }, 964 timedSamples: []testGaugeMetricTimedSample{ 965 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 966 }, 967 } 968 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 969 identTag: "endpoint", 970 rulesConfig: &RulesConfiguration{ 971 MappingRules: []MappingRuleConfiguration{ 972 { 973 Filter: "__m3_type__:counter", 974 Aggregations: []aggregation.Type{aggregation.Max}, 975 StoragePolicies: []StoragePolicyConfiguration{ 976 { 977 Resolution: 1 * time.Second, 978 Retention: 30 * 24 * time.Hour, 979 }, 980 }, 981 }, 982 }, 983 }, 984 sampleAppenderOpts: &SampleAppenderOptions{ 985 SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeGauge}, 986 }, 987 ingest: &testDownsamplerOptionsIngest{ 988 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 989 }, 990 expect: &testDownsamplerOptionsExpect{ 991 writes: []testExpectedWrite{}, 992 }, 993 }) 994 995 // Test expected output 996 testDownsamplerAggregation(t, testDownsampler) 997 } 998 999 //nolint:dupl 1000 func TestDownsamplerAggregationWithRulesConfigMappingRulesPromTypeFilterNoMatch(t *testing.T) { 1001 t.Parallel() 1002 1003 gaugeMetric := testGaugeMetric{ 1004 tags: map[string]string{ 1005 "app": "nginx_edge", 1006 "endpoint": "health", 1007 }, 1008 timedSamples: []testGaugeMetricTimedSample{ 1009 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1010 }, 1011 } 1012 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1013 identTag: "endpoint", 1014 rulesConfig: &RulesConfiguration{ 1015 MappingRules: []MappingRuleConfiguration{ 1016 { 1017 Filter: "__m3_prom_type__:counter", 1018 Aggregations: []aggregation.Type{aggregation.Max}, 1019 StoragePolicies: []StoragePolicyConfiguration{ 1020 { 1021 Resolution: 1 * time.Second, 1022 Retention: 30 * 24 * time.Hour, 1023 }, 1024 }, 1025 }, 1026 }, 1027 }, 1028 sampleAppenderOpts: &SampleAppenderOptions{ 1029 SeriesAttributes: ts.SeriesAttributes{PromType: ts.PromMetricTypeGauge}, 1030 }, 1031 ingest: &testDownsamplerOptionsIngest{ 1032 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1033 }, 1034 expect: &testDownsamplerOptionsExpect{ 1035 writes: []testExpectedWrite{}, 1036 }, 1037 }) 1038 1039 // Test expected output 1040 testDownsamplerAggregation(t, testDownsampler) 1041 } 1042 1043 func TestDownsamplerAggregationWithRulesConfigMappingRulesAggregationType(t *testing.T) { 1044 t.Parallel() 1045 1046 gaugeMetric := testGaugeMetric{ 1047 tags: map[string]string{ 1048 "__g0__": "nginx_edge", 1049 "__g1__": "health", 1050 "__option_id_scheme__": "graphite", 1051 }, 1052 timedSamples: []testGaugeMetricTimedSample{ 1053 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1054 }, 1055 } 1056 tags := []Tag{{Name: "__m3_graphite_aggregation__"}} 1057 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1058 identTag: "__g2__", 1059 rulesConfig: &RulesConfiguration{ 1060 MappingRules: []MappingRuleConfiguration{ 1061 { 1062 Filter: "__m3_type__:gauge", 1063 Aggregations: []aggregation.Type{aggregation.Max}, 1064 StoragePolicies: []StoragePolicyConfiguration{ 1065 { 1066 Resolution: 1 * time.Second, 1067 Retention: 30 * 24 * time.Hour, 1068 }, 1069 }, 1070 Tags: tags, 1071 }, 1072 }, 1073 }, 1074 ingest: &testDownsamplerOptionsIngest{ 1075 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1076 }, 1077 expect: &testDownsamplerOptionsExpect{ 1078 writes: []testExpectedWrite{ 1079 { 1080 tags: map[string]string{ 1081 "__g0__": "nginx_edge", 1082 "__g1__": "health", 1083 "__g2__": "upper", 1084 }, 1085 values: []expectedValue{{value: 30}}, 1086 attributes: &storagemetadata.Attributes{ 1087 MetricsType: storagemetadata.AggregatedMetricsType, 1088 Resolution: 1 * time.Second, 1089 Retention: 30 * 24 * time.Hour, 1090 }, 1091 }, 1092 }, 1093 }, 1094 }) 1095 1096 // Test expected output 1097 testDownsamplerAggregation(t, testDownsampler) 1098 } 1099 1100 func TestDownsamplerAggregationWithRulesConfigMappingRulesMultipleAggregationType(t *testing.T) { 1101 t.Parallel() 1102 1103 gaugeMetric := testGaugeMetric{ 1104 tags: map[string]string{ 1105 "__g0__": "nginx_edge", 1106 "__g1__": "health", 1107 }, 1108 timedSamples: []testGaugeMetricTimedSample{ 1109 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1110 }, 1111 } 1112 tags := []Tag{{Name: "__m3_graphite_aggregation__"}} 1113 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1114 identTag: "__g2__", 1115 rulesConfig: &RulesConfiguration{ 1116 MappingRules: []MappingRuleConfiguration{ 1117 { 1118 Filter: "__m3_type__:gauge", 1119 Aggregations: []aggregation.Type{aggregation.Max}, 1120 StoragePolicies: []StoragePolicyConfiguration{ 1121 { 1122 Resolution: 1 * time.Second, 1123 Retention: 30 * 24 * time.Hour, 1124 }, 1125 }, 1126 Tags: tags, 1127 }, 1128 { 1129 Filter: "__m3_type__:gauge", 1130 Aggregations: []aggregation.Type{aggregation.Sum}, 1131 StoragePolicies: []StoragePolicyConfiguration{ 1132 { 1133 Resolution: 1 * time.Second, 1134 Retention: 30 * 24 * time.Hour, 1135 }, 1136 }, 1137 Tags: tags, 1138 }, 1139 }, 1140 }, 1141 ingest: &testDownsamplerOptionsIngest{ 1142 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1143 }, 1144 expect: &testDownsamplerOptionsExpect{ 1145 writes: []testExpectedWrite{ 1146 { 1147 tags: map[string]string{ 1148 "__g0__": "nginx_edge", 1149 "__g1__": "health", 1150 "__g2__": "upper", 1151 }, 1152 values: []expectedValue{{value: 30}}, 1153 attributes: &storagemetadata.Attributes{ 1154 MetricsType: storagemetadata.AggregatedMetricsType, 1155 Resolution: 1 * time.Second, 1156 Retention: 30 * 24 * time.Hour, 1157 }, 1158 }, 1159 { 1160 tags: map[string]string{ 1161 "__g0__": "nginx_edge", 1162 "__g1__": "health", 1163 "__g2__": "sum", 1164 }, 1165 values: []expectedValue{{value: 60}}, 1166 attributes: &storagemetadata.Attributes{ 1167 MetricsType: storagemetadata.AggregatedMetricsType, 1168 Resolution: 1 * time.Second, 1169 Retention: 30 * 24 * time.Hour, 1170 }, 1171 }, 1172 }, 1173 }, 1174 }) 1175 1176 // Test expected output 1177 testDownsamplerAggregation(t, testDownsampler) 1178 } 1179 1180 func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixAndAggregationTags(t *testing.T) { 1181 t.Parallel() 1182 1183 gaugeMetric := testGaugeMetric{ 1184 tags: map[string]string{ 1185 "__g0__": "nginx_edge", 1186 "__g1__": "health", 1187 }, 1188 timedSamples: []testGaugeMetricTimedSample{ 1189 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1190 }, 1191 } 1192 tags := []Tag{ 1193 {Name: "__m3_graphite_aggregation__"}, 1194 {Name: "__m3_graphite_prefix__", Value: "stats.counter"}, 1195 } 1196 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1197 identTag: "__g4__", 1198 rulesConfig: &RulesConfiguration{ 1199 MappingRules: []MappingRuleConfiguration{ 1200 { 1201 Filter: "__m3_type__:gauge", 1202 Aggregations: []aggregation.Type{aggregation.Max}, 1203 StoragePolicies: []StoragePolicyConfiguration{ 1204 { 1205 Resolution: 1 * time.Second, 1206 Retention: 30 * 24 * time.Hour, 1207 }, 1208 }, 1209 Tags: tags, 1210 }, 1211 }, 1212 }, 1213 ingest: &testDownsamplerOptionsIngest{ 1214 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1215 }, 1216 expect: &testDownsamplerOptionsExpect{ 1217 writes: []testExpectedWrite{ 1218 { 1219 tags: map[string]string{ 1220 "__g0__": "stats", 1221 "__g1__": "counter", 1222 "__g2__": "nginx_edge", 1223 "__g3__": "health", 1224 "__g4__": "upper", 1225 }, 1226 values: []expectedValue{{value: 30}}, 1227 attributes: &storagemetadata.Attributes{ 1228 MetricsType: storagemetadata.AggregatedMetricsType, 1229 Resolution: 1 * time.Second, 1230 Retention: 30 * 24 * time.Hour, 1231 }, 1232 }, 1233 }, 1234 }, 1235 }) 1236 1237 // Test expected output 1238 testDownsamplerAggregation(t, testDownsampler) 1239 } 1240 1241 func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixTag(t *testing.T) { 1242 t.Parallel() 1243 1244 gaugeMetric := testGaugeMetric{ 1245 tags: map[string]string{ 1246 "__g0__": "nginx_edge", 1247 "__g1__": "health", 1248 }, 1249 timedSamples: []testGaugeMetricTimedSample{ 1250 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1251 }, 1252 } 1253 tags := []Tag{ 1254 {Name: "__m3_graphite_prefix__", Value: "stats.counter"}, 1255 } 1256 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1257 identTag: "__g3__", 1258 rulesConfig: &RulesConfiguration{ 1259 MappingRules: []MappingRuleConfiguration{ 1260 { 1261 Filter: "__m3_type__:gauge", 1262 Aggregations: []aggregation.Type{aggregation.Max}, 1263 StoragePolicies: []StoragePolicyConfiguration{ 1264 { 1265 Resolution: 1 * time.Second, 1266 Retention: 30 * 24 * time.Hour, 1267 }, 1268 }, 1269 Tags: tags, 1270 }, 1271 }, 1272 }, 1273 ingest: &testDownsamplerOptionsIngest{ 1274 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1275 }, 1276 expect: &testDownsamplerOptionsExpect{ 1277 writes: []testExpectedWrite{ 1278 { 1279 tags: map[string]string{ 1280 "__g0__": "stats", 1281 "__g1__": "counter", 1282 "__g2__": "nginx_edge", 1283 "__g3__": "health", 1284 }, 1285 values: []expectedValue{{value: 30}}, 1286 attributes: &storagemetadata.Attributes{ 1287 MetricsType: storagemetadata.AggregatedMetricsType, 1288 Resolution: 1 * time.Second, 1289 Retention: 30 * 24 * time.Hour, 1290 }, 1291 }, 1292 }, 1293 }, 1294 }) 1295 1296 // Test expected output 1297 testDownsamplerAggregation(t, testDownsampler) 1298 } 1299 1300 func TestDownsamplerAggregationWithRulesConfigMappingRulesPromQuantileTag(t *testing.T) { 1301 t.Parallel() 1302 1303 timerMetric := testTimerMetric{ 1304 tags: map[string]string{ 1305 nameTag: "http_requests", 1306 "app": "nginx_edge", 1307 "endpoint": "health", 1308 }, 1309 timedSamples: []testTimerMetricTimedSample{ 1310 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1311 }, 1312 } 1313 tags := []Tag{ 1314 {Name: "__m3_prom_summary__"}, 1315 } 1316 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1317 rulesConfig: &RulesConfiguration{ 1318 MappingRules: []MappingRuleConfiguration{ 1319 { 1320 Filter: "__m3_type__:timer", 1321 Aggregations: []aggregation.Type{aggregation.P50}, 1322 StoragePolicies: []StoragePolicyConfiguration{ 1323 { 1324 Resolution: 1 * time.Second, 1325 Retention: 30 * 24 * time.Hour, 1326 }, 1327 }, 1328 Tags: tags, 1329 }, 1330 }, 1331 }, 1332 sampleAppenderOpts: &SampleAppenderOptions{ 1333 SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeTimer}, 1334 }, 1335 ingest: &testDownsamplerOptionsIngest{ 1336 timerMetrics: []testTimerMetric{timerMetric}, 1337 }, 1338 expect: &testDownsamplerOptionsExpect{ 1339 writes: []testExpectedWrite{ 1340 { 1341 tags: map[string]string{ 1342 nameTag: "http_requests", 1343 "app": "nginx_edge", 1344 "endpoint": "health", 1345 "agg": ".p50", 1346 "quantile": "0.5", 1347 }, 1348 values: []expectedValue{{value: 10}}, 1349 attributes: &storagemetadata.Attributes{ 1350 MetricsType: storagemetadata.AggregatedMetricsType, 1351 Resolution: 1 * time.Second, 1352 Retention: 30 * 24 * time.Hour, 1353 }, 1354 }, 1355 }, 1356 }, 1357 }) 1358 1359 // Test expected output 1360 testDownsamplerAggregation(t, testDownsampler) 1361 } 1362 1363 func TestDownsamplerAggregationWithRulesConfigMappingRulesPromQuantileTagIgnored(t *testing.T) { 1364 t.Parallel() 1365 1366 timerMetric := testTimerMetric{ 1367 tags: map[string]string{ 1368 nameTag: "http_requests", 1369 "app": "nginx_edge", 1370 "endpoint": "health", 1371 }, 1372 timedSamples: []testTimerMetricTimedSample{ 1373 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1374 }, 1375 } 1376 tags := []Tag{ 1377 {Name: "__m3_prom_summary__"}, 1378 } 1379 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1380 rulesConfig: &RulesConfiguration{ 1381 MappingRules: []MappingRuleConfiguration{ 1382 { 1383 Filter: "__m3_type__:timer", 1384 Aggregations: []aggregation.Type{aggregation.Max}, 1385 StoragePolicies: []StoragePolicyConfiguration{ 1386 { 1387 Resolution: 1 * time.Second, 1388 Retention: 30 * 24 * time.Hour, 1389 }, 1390 }, 1391 Tags: tags, 1392 }, 1393 }, 1394 }, 1395 sampleAppenderOpts: &SampleAppenderOptions{ 1396 SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeTimer}, 1397 }, 1398 ingest: &testDownsamplerOptionsIngest{ 1399 timerMetrics: []testTimerMetric{timerMetric}, 1400 }, 1401 expect: &testDownsamplerOptionsExpect{ 1402 writes: []testExpectedWrite{ 1403 { 1404 tags: map[string]string{ 1405 nameTag: "http_requests", 1406 "app": "nginx_edge", 1407 "endpoint": "health", 1408 "agg": ".upper", 1409 }, 1410 values: []expectedValue{{value: 30}}, 1411 attributes: &storagemetadata.Attributes{ 1412 MetricsType: storagemetadata.AggregatedMetricsType, 1413 Resolution: 1 * time.Second, 1414 Retention: 30 * 24 * time.Hour, 1415 }, 1416 }, 1417 }, 1418 }, 1419 }) 1420 1421 // Test expected output 1422 testDownsamplerAggregation(t, testDownsampler) 1423 } 1424 1425 func TestDownsamplerAggregationWithRulesConfigMappingRulesAugmentTag(t *testing.T) { 1426 t.Parallel() 1427 1428 gaugeMetric := testGaugeMetric{ 1429 tags: map[string]string{ 1430 "app": "nginx_edge", 1431 "endpoint": "health", 1432 }, 1433 timedSamples: []testGaugeMetricTimedSample{ 1434 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1435 }, 1436 } 1437 tags := []Tag{ 1438 {Name: "datacenter", Value: "abc"}, 1439 } 1440 //nolint:dupl 1441 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1442 identTag: "app", 1443 rulesConfig: &RulesConfiguration{ 1444 MappingRules: []MappingRuleConfiguration{ 1445 { 1446 Filter: "app:nginx*", 1447 Aggregations: []aggregation.Type{aggregation.Max}, 1448 StoragePolicies: []StoragePolicyConfiguration{ 1449 { 1450 Resolution: 1 * time.Second, 1451 Retention: 30 * 24 * time.Hour, 1452 }, 1453 }, 1454 Tags: tags, 1455 }, 1456 }, 1457 }, 1458 ingest: &testDownsamplerOptionsIngest{ 1459 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1460 }, 1461 expect: &testDownsamplerOptionsExpect{ 1462 writes: []testExpectedWrite{ 1463 { 1464 tags: map[string]string{ 1465 "app": "nginx_edge", 1466 "endpoint": "health", 1467 "datacenter": "abc", 1468 }, 1469 values: []expectedValue{{value: 30}}, 1470 attributes: &storagemetadata.Attributes{ 1471 MetricsType: storagemetadata.AggregatedMetricsType, 1472 Resolution: 1 * time.Second, 1473 Retention: 30 * 24 * time.Hour, 1474 }, 1475 }, 1476 }, 1477 }, 1478 }) 1479 1480 // Test expected output 1481 testDownsamplerAggregation(t, testDownsampler) 1482 } 1483 1484 func TestDownsamplerAggregationWithRulesConfigMappingRulesWithDropTSTag(t *testing.T) { 1485 t.Parallel() 1486 1487 gaugeMetric := testGaugeMetric{ 1488 tags: map[string]string{ 1489 nameTag: "foo_metric", 1490 "app": "nginx_edge", 1491 }, 1492 timedSamples: []testGaugeMetricTimedSample{ 1493 {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0}, 1494 }, 1495 } 1496 counterMetric := testCounterMetric{ 1497 tags: map[string]string{ 1498 nameTag: "counter0", 1499 "app": "testapp", 1500 "foo": "bar", 1501 }, 1502 timedSamples: []testCounterMetricTimedSample{ 1503 {value: 1}, {value: 2}, {value: 3}, 1504 }, 1505 expectDropTimestamp: true, 1506 } 1507 tags := []Tag{ 1508 {Name: "__m3_drop_timestamp__", Value: ""}, 1509 } 1510 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1511 rulesConfig: &RulesConfiguration{ 1512 MappingRules: []MappingRuleConfiguration{ 1513 { 1514 Filter: "app:nginx*", 1515 Aggregations: []aggregation.Type{aggregation.Max}, 1516 StoragePolicies: []StoragePolicyConfiguration{ 1517 { 1518 Resolution: 1 * time.Second, 1519 Retention: 30 * 24 * time.Hour, 1520 }, 1521 }, 1522 }, 1523 { 1524 Filter: "app:testapp", 1525 Aggregations: []aggregation.Type{aggregation.Sum}, 1526 StoragePolicies: []StoragePolicyConfiguration{ 1527 { 1528 Resolution: 1 * time.Second, 1529 Retention: 30 * 24 * time.Hour, 1530 }, 1531 }, 1532 Tags: tags, 1533 }, 1534 }, 1535 }, 1536 ingest: &testDownsamplerOptionsIngest{ 1537 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1538 counterMetrics: []testCounterMetric{counterMetric}, 1539 }, 1540 expect: &testDownsamplerOptionsExpect{ 1541 writes: []testExpectedWrite{ 1542 { 1543 tags: gaugeMetric.tags, 1544 values: []expectedValue{{value: 30}}, 1545 attributes: &storagemetadata.Attributes{ 1546 MetricsType: storagemetadata.AggregatedMetricsType, 1547 Resolution: 1 * time.Second, 1548 Retention: 30 * 24 * time.Hour, 1549 }, 1550 }, 1551 { 1552 tags: counterMetric.tags, 1553 values: []expectedValue{{value: 6}}, 1554 attributes: &storagemetadata.Attributes{ 1555 MetricsType: storagemetadata.AggregatedMetricsType, 1556 Resolution: 1 * time.Second, 1557 Retention: 30 * 24 * time.Hour, 1558 }, 1559 }, 1560 }, 1561 }, 1562 }) 1563 1564 // Test expected output 1565 testDownsamplerAggregation(t, testDownsampler) 1566 } 1567 1568 func TestDownsamplerAggregationWithRulesConfigRollupRulesNoNameTag(t *testing.T) { 1569 t.Parallel() 1570 1571 gaugeMetric := testGaugeMetric{ 1572 tags: map[string]string{ 1573 "app": "nginx_edge", 1574 "status_code": "500", 1575 "endpoint": "/foo/bar", 1576 "not_rolled_up": "not_rolled_up_value", 1577 }, 1578 timedSamples: []testGaugeMetricTimedSample{ 1579 {value: 42}, 1580 {value: 64, offset: 1 * time.Second}, 1581 }, 1582 } 1583 res := 1 * time.Second 1584 ret := 30 * 24 * time.Hour 1585 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1586 identTag: "endpoint", 1587 rulesConfig: &RulesConfiguration{ 1588 RollupRules: []RollupRuleConfiguration{ 1589 { 1590 Filter: fmt.Sprintf( 1591 "%s:http_requests app:* status_code:* endpoint:*", 1592 nameTag), 1593 Transforms: []TransformConfiguration{ 1594 { 1595 Transform: &TransformOperationConfiguration{ 1596 Type: transformation.PerSecond, 1597 }, 1598 }, 1599 { 1600 Rollup: &RollupOperationConfiguration{ 1601 MetricName: "http_requests_by_status_code", 1602 GroupBy: []string{"app", "status_code", "endpoint"}, 1603 Aggregations: []aggregation.Type{aggregation.Sum}, 1604 }, 1605 }, 1606 }, 1607 StoragePolicies: []StoragePolicyConfiguration{ 1608 { 1609 Resolution: res, 1610 Retention: ret, 1611 }, 1612 }, 1613 }, 1614 }, 1615 }, 1616 ingest: &testDownsamplerOptionsIngest{ 1617 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1618 }, 1619 expect: &testDownsamplerOptionsExpect{ 1620 writes: []testExpectedWrite{}, 1621 }, 1622 }) 1623 1624 // Test expected output 1625 testDownsamplerAggregation(t, testDownsampler) 1626 } 1627 1628 func TestDownsamplerAggregationWithRulesConfigRollupRulesPerSecondSum(t *testing.T) { 1629 t.Parallel() 1630 1631 gaugeMetric := testGaugeMetric{ 1632 tags: map[string]string{ 1633 nameTag: "http_requests", 1634 "app": "nginx_edge", 1635 "status_code": "500", 1636 "endpoint": "/foo/bar", 1637 "not_rolled_up": "not_rolled_up_value", 1638 }, 1639 timedSamples: []testGaugeMetricTimedSample{ 1640 {value: 42}, 1641 {value: 64, offset: 1 * time.Second}, 1642 }, 1643 } 1644 res := 1 * time.Second 1645 ret := 30 * 24 * time.Hour 1646 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1647 rulesConfig: &RulesConfiguration{ 1648 RollupRules: []RollupRuleConfiguration{ 1649 { 1650 Filter: fmt.Sprintf( 1651 "%s:http_requests app:* status_code:* endpoint:*", 1652 nameTag), 1653 Transforms: []TransformConfiguration{ 1654 { 1655 Transform: &TransformOperationConfiguration{ 1656 Type: transformation.PerSecond, 1657 }, 1658 }, 1659 { 1660 Rollup: &RollupOperationConfiguration{ 1661 MetricName: "http_requests_by_status_code", 1662 GroupBy: []string{"app", "status_code", "endpoint"}, 1663 Aggregations: []aggregation.Type{aggregation.Sum}, 1664 }, 1665 }, 1666 }, 1667 StoragePolicies: []StoragePolicyConfiguration{ 1668 { 1669 Resolution: res, 1670 Retention: ret, 1671 }, 1672 }, 1673 }, 1674 }, 1675 }, 1676 ingest: &testDownsamplerOptionsIngest{ 1677 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1678 }, 1679 expect: &testDownsamplerOptionsExpect{ 1680 writes: []testExpectedWrite{ 1681 { 1682 tags: map[string]string{ 1683 nameTag: "http_requests_by_status_code", 1684 string(rollupTagName): string(rollupTagValue), 1685 "app": "nginx_edge", 1686 "status_code": "500", 1687 "endpoint": "/foo/bar", 1688 }, 1689 values: []expectedValue{{value: 22}}, 1690 attributes: &storagemetadata.Attributes{ 1691 MetricsType: storagemetadata.AggregatedMetricsType, 1692 Resolution: res, 1693 Retention: ret, 1694 }, 1695 }, 1696 }, 1697 }, 1698 }) 1699 1700 // Test expected output 1701 testDownsamplerAggregation(t, testDownsampler) 1702 } 1703 1704 func TestDownsamplerAggregationWithRulesConfigRollupRulesAugmentTags(t *testing.T) { 1705 t.Parallel() 1706 1707 gaugeMetric := testGaugeMetric{ 1708 tags: map[string]string{ 1709 nameTag: "http_requests", 1710 "app": "nginx_edge", 1711 "status_code": "500", 1712 "endpoint": "/foo/bar", 1713 "not_rolled_up": "not_rolled_up_value", 1714 }, 1715 timedSamples: []testGaugeMetricTimedSample{ 1716 {value: 42}, 1717 {value: 64, offset: 1 * time.Second}, 1718 }, 1719 } 1720 res := 1 * time.Second 1721 ret := 30 * 24 * time.Hour 1722 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1723 rulesConfig: &RulesConfiguration{ 1724 RollupRules: []RollupRuleConfiguration{ 1725 { 1726 Filter: fmt.Sprintf( 1727 "%s:http_requests app:* status_code:* endpoint:*", 1728 nameTag), 1729 Transforms: []TransformConfiguration{ 1730 { 1731 Transform: &TransformOperationConfiguration{ 1732 Type: transformation.PerSecond, 1733 }, 1734 }, 1735 { 1736 Rollup: &RollupOperationConfiguration{ 1737 MetricName: "http_requests_by_status_code", 1738 GroupBy: []string{"app", "status_code", "endpoint"}, 1739 Aggregations: []aggregation.Type{aggregation.Sum}, 1740 }, 1741 }, 1742 }, 1743 StoragePolicies: []StoragePolicyConfiguration{ 1744 { 1745 Resolution: res, 1746 Retention: ret, 1747 }, 1748 }, 1749 Tags: []Tag{ 1750 { 1751 Name: "__rollup_type__", 1752 Value: "counter", 1753 }, 1754 }, 1755 }, 1756 }, 1757 }, 1758 ingest: &testDownsamplerOptionsIngest{ 1759 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1760 }, 1761 expect: &testDownsamplerOptionsExpect{ 1762 writes: []testExpectedWrite{ 1763 { 1764 tags: map[string]string{ 1765 nameTag: "http_requests_by_status_code", 1766 string(rollupTagName): string(rollupTagValue), 1767 "__rollup_type__": "counter", 1768 "app": "nginx_edge", 1769 "status_code": "500", 1770 "endpoint": "/foo/bar", 1771 }, 1772 values: []expectedValue{{value: 22}}, 1773 attributes: &storagemetadata.Attributes{ 1774 MetricsType: storagemetadata.AggregatedMetricsType, 1775 Resolution: res, 1776 Retention: ret, 1777 }, 1778 }, 1779 }, 1780 }, 1781 }) 1782 1783 // Test expected output 1784 testDownsamplerAggregation(t, testDownsampler) 1785 } 1786 1787 // TestDownsamplerAggregationWithRulesConfigRollupRulesAggregateTransformNoRollup 1788 // tests that rollup rules can be used to actually simply transform values 1789 // without the need for an explicit rollup step. 1790 func TestDownsamplerAggregationWithRulesConfigRollupRulesAggregateTransformNoRollup(t *testing.T) { 1791 t.Parallel() 1792 1793 gaugeMetric := testGaugeMetric{ 1794 tags: map[string]string{ 1795 nameTag: "http_requests", 1796 "app": "nginx_edge", 1797 "status_code": "500", 1798 "endpoint": "/foo/bar", 1799 "not_rolled_up": "not_rolled_up_value", 1800 }, 1801 timedSamples: []testGaugeMetricTimedSample{ 1802 {value: 42}, 1803 {value: 64}, 1804 }, 1805 } 1806 res := 5 * time.Second 1807 ret := 30 * 24 * time.Hour 1808 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1809 rulesConfig: &RulesConfiguration{ 1810 RollupRules: []RollupRuleConfiguration{ 1811 { 1812 Filter: fmt.Sprintf( 1813 "%s:http_requests app:* status_code:* endpoint:*", 1814 nameTag), 1815 Transforms: []TransformConfiguration{ 1816 { 1817 Aggregate: &AggregateOperationConfiguration{ 1818 Type: aggregation.Sum, 1819 }, 1820 }, 1821 { 1822 Transform: &TransformOperationConfiguration{ 1823 Type: transformation.Add, 1824 }, 1825 }, 1826 }, 1827 StoragePolicies: []StoragePolicyConfiguration{ 1828 { 1829 Resolution: res, 1830 Retention: ret, 1831 }, 1832 }, 1833 }, 1834 }, 1835 }, 1836 ingest: &testDownsamplerOptionsIngest{ 1837 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 1838 }, 1839 expect: &testDownsamplerOptionsExpect{ 1840 writes: []testExpectedWrite{ 1841 { 1842 tags: map[string]string{ 1843 nameTag: "http_requests", 1844 "app": "nginx_edge", 1845 "status_code": "500", 1846 "endpoint": "/foo/bar", 1847 "not_rolled_up": "not_rolled_up_value", 1848 }, 1849 values: []expectedValue{{value: 106}}, 1850 attributes: &storagemetadata.Attributes{ 1851 MetricsType: storagemetadata.AggregatedMetricsType, 1852 Resolution: res, 1853 Retention: ret, 1854 }, 1855 }, 1856 }, 1857 }, 1858 }) 1859 1860 // Test expected output 1861 testDownsamplerAggregation(t, testDownsampler) 1862 } 1863 1864 func TestDownsamplerAggregationWithRulesConfigRollupRulesIncreaseAdd(t *testing.T) { 1865 t.Parallel() 1866 1867 // nolint:dupl 1868 gaugeMetrics := []testGaugeMetric{ 1869 { 1870 tags: map[string]string{ 1871 nameTag: "http_requests", 1872 "app": "nginx_edge", 1873 "status_code": "500", 1874 "endpoint": "/foo/bar", 1875 "not_rolled_up": "not_rolled_up_value_1", 1876 }, 1877 timedSamples: []testGaugeMetricTimedSample{ 1878 {value: 42, offset: 1 * time.Second}, // +42 1879 {value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should count as a 0) 1880 {value: 33, offset: 3 * time.Second}, // +21 1881 }, 1882 }, 1883 { 1884 tags: map[string]string{ 1885 nameTag: "http_requests", 1886 "app": "nginx_edge", 1887 "status_code": "500", 1888 "endpoint": "/foo/bar", 1889 "not_rolled_up": "not_rolled_up_value_2", 1890 }, 1891 timedSamples: []testGaugeMetricTimedSample{ 1892 {value: 13, offset: 1 * time.Second}, // +13 1893 {value: 27, offset: 2 * time.Second}, // +14 1894 {value: 42, offset: 3 * time.Second}, // +15 1895 }, 1896 }, 1897 } 1898 res := 1 * time.Second 1899 ret := 30 * 24 * time.Hour 1900 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1901 rulesConfig: &RulesConfiguration{ 1902 RollupRules: []RollupRuleConfiguration{ 1903 { 1904 Filter: fmt.Sprintf( 1905 "%s:http_requests app:* status_code:* endpoint:*", 1906 nameTag), 1907 Transforms: []TransformConfiguration{ 1908 { 1909 Transform: &TransformOperationConfiguration{ 1910 Type: transformation.Increase, 1911 }, 1912 }, 1913 { 1914 Rollup: &RollupOperationConfiguration{ 1915 MetricName: "http_requests_by_status_code", 1916 GroupBy: []string{"app", "status_code", "endpoint"}, 1917 Aggregations: []aggregation.Type{aggregation.Sum}, 1918 }, 1919 }, 1920 { 1921 Transform: &TransformOperationConfiguration{ 1922 Type: transformation.Add, 1923 }, 1924 }, 1925 }, 1926 StoragePolicies: []StoragePolicyConfiguration{ 1927 { 1928 Resolution: res, 1929 Retention: ret, 1930 }, 1931 }, 1932 }, 1933 }, 1934 }, 1935 ingest: &testDownsamplerOptionsIngest{ 1936 gaugeMetrics: gaugeMetrics, 1937 }, 1938 expect: &testDownsamplerOptionsExpect{ 1939 writes: []testExpectedWrite{ 1940 { 1941 tags: map[string]string{ 1942 nameTag: "http_requests_by_status_code", 1943 string(rollupTagName): string(rollupTagValue), 1944 "app": "nginx_edge", 1945 "status_code": "500", 1946 "endpoint": "/foo/bar", 1947 }, 1948 values: []expectedValue{ 1949 {value: 55}, 1950 {value: 69, offset: 1 * time.Second}, 1951 {value: 105, offset: 2 * time.Second}, 1952 }, 1953 attributes: &storagemetadata.Attributes{ 1954 MetricsType: storagemetadata.AggregatedMetricsType, 1955 Resolution: res, 1956 Retention: ret, 1957 }, 1958 }, 1959 }, 1960 }, 1961 }) 1962 1963 // Test expected output 1964 testDownsamplerAggregation(t, testDownsampler) 1965 } 1966 1967 func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicy(t *testing.T) { 1968 t.Parallel() 1969 1970 gaugeMetric := testGaugeMetric{ 1971 tags: map[string]string{ 1972 nameTag: "http_requests", 1973 "app": "nginx_edge", 1974 "status_code": "500", 1975 "endpoint": "/foo/bar", 1976 "not_rolled_up": "not_rolled_up_value", 1977 }, 1978 timedSamples: []testGaugeMetricTimedSample{ 1979 {value: 42}, 1980 {value: 64, offset: 1 * time.Second}, 1981 }, 1982 expectDropPolicyApplied: true, 1983 } 1984 res := 1 * time.Second 1985 ret := 30 * 24 * time.Hour 1986 filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag) 1987 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 1988 rulesConfig: &RulesConfiguration{ 1989 MappingRules: []MappingRuleConfiguration{ 1990 { 1991 Filter: filter, 1992 Drop: true, 1993 }, 1994 }, 1995 RollupRules: []RollupRuleConfiguration{ 1996 { 1997 Filter: filter, 1998 Transforms: []TransformConfiguration{ 1999 { 2000 Transform: &TransformOperationConfiguration{ 2001 Type: transformation.PerSecond, 2002 }, 2003 }, 2004 { 2005 Rollup: &RollupOperationConfiguration{ 2006 MetricName: "http_requests_by_status_code", 2007 GroupBy: []string{"app", "status_code", "endpoint"}, 2008 Aggregations: []aggregation.Type{aggregation.Sum}, 2009 }, 2010 }, 2011 }, 2012 StoragePolicies: []StoragePolicyConfiguration{ 2013 { 2014 Resolution: res, 2015 Retention: ret, 2016 }, 2017 }, 2018 }, 2019 }, 2020 }, 2021 ingest: &testDownsamplerOptionsIngest{ 2022 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 2023 }, 2024 expect: &testDownsamplerOptionsExpect{ 2025 writes: []testExpectedWrite{ 2026 { 2027 tags: map[string]string{ 2028 nameTag: "http_requests_by_status_code", 2029 string(rollupTagName): string(rollupTagValue), 2030 "app": "nginx_edge", 2031 "status_code": "500", 2032 "endpoint": "/foo/bar", 2033 }, 2034 values: []expectedValue{{value: 22}}, 2035 attributes: &storagemetadata.Attributes{ 2036 MetricsType: storagemetadata.AggregatedMetricsType, 2037 Resolution: res, 2038 Retention: ret, 2039 }, 2040 }, 2041 }, 2042 }, 2043 }) 2044 2045 // Test expected output 2046 testDownsamplerAggregation(t, testDownsampler) 2047 } 2048 2049 func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicyAndDropTimestamp(t *testing.T) { 2050 t.Parallel() 2051 2052 gaugeMetrics := []testGaugeMetric{ 2053 { 2054 tags: map[string]string{ 2055 nameTag: "http_requests", 2056 "app": "nginx_edge", 2057 "status_code": "500", 2058 "endpoint": "/foo/bar", 2059 "not_rolled_up": "not_rolled_up_value_1", 2060 }, 2061 timedSamples: []testGaugeMetricTimedSample{ 2062 {value: 42}, // +42 (should not be accounted since is a reset) 2063 // Explicit no value. 2064 {value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should not be accounted) 2065 }, 2066 expectDropTimestamp: true, 2067 expectDropPolicyApplied: true, 2068 }, 2069 { 2070 tags: map[string]string{ 2071 nameTag: "http_requests", 2072 "app": "nginx_edge", 2073 "status_code": "500", 2074 "endpoint": "/foo/bar", 2075 "not_rolled_up": "not_rolled_up_value_2", 2076 }, 2077 timedSamples: []testGaugeMetricTimedSample{ 2078 {value: 13}, 2079 {value: 27, offset: 2 * time.Second}, // +14 2080 }, 2081 expectDropTimestamp: true, 2082 expectDropPolicyApplied: true, 2083 }, 2084 } 2085 tags := []Tag{ 2086 {Name: "__m3_drop_timestamp__", Value: ""}, 2087 } 2088 res := 1 * time.Second 2089 ret := 30 * 24 * time.Hour 2090 filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag) 2091 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2092 rulesConfig: &RulesConfiguration{ 2093 MappingRules: []MappingRuleConfiguration{ 2094 { 2095 Filter: filter, 2096 Drop: true, 2097 Tags: tags, 2098 }, 2099 }, 2100 RollupRules: []RollupRuleConfiguration{ 2101 { 2102 Filter: filter, 2103 Transforms: []TransformConfiguration{ 2104 { 2105 Rollup: &RollupOperationConfiguration{ 2106 MetricName: "http_requests_by_status_code", 2107 GroupBy: []string{"app", "status_code", "endpoint"}, 2108 Aggregations: []aggregation.Type{aggregation.Sum}, 2109 }, 2110 }, 2111 }, 2112 StoragePolicies: []StoragePolicyConfiguration{ 2113 { 2114 Resolution: res, 2115 Retention: ret, 2116 }, 2117 }, 2118 }, 2119 }, 2120 }, 2121 ingest: &testDownsamplerOptionsIngest{ 2122 gaugeMetrics: gaugeMetrics, 2123 }, 2124 expect: &testDownsamplerOptionsExpect{ 2125 writes: []testExpectedWrite{ 2126 { 2127 tags: map[string]string{ 2128 nameTag: "http_requests_by_status_code", 2129 string(rollupTagName): string(rollupTagValue), 2130 "app": "nginx_edge", 2131 "status_code": "500", 2132 "endpoint": "/foo/bar", 2133 }, 2134 values: []expectedValue{{value: 94}}, 2135 attributes: &storagemetadata.Attributes{ 2136 MetricsType: storagemetadata.AggregatedMetricsType, 2137 Resolution: res, 2138 Retention: ret, 2139 }, 2140 }, 2141 }, 2142 }, 2143 }) 2144 2145 // Test expected output 2146 testDownsamplerAggregation(t, testDownsampler) 2147 } 2148 2149 func TestDownsamplerAggregationWithRulesConfigRollupRuleUntimedRollups(t *testing.T) { 2150 t.Parallel() 2151 2152 gaugeMetrics := []testGaugeMetric{ 2153 { 2154 tags: map[string]string{ 2155 nameTag: "http_requests", 2156 "app": "nginx_edge", 2157 "status_code": "500", 2158 "endpoint": "/foo/bar", 2159 "not_rolled_up": "not_rolled_up_value_1", 2160 }, 2161 timedSamples: []testGaugeMetricTimedSample{ 2162 {value: 42}, 2163 {value: 12, offset: 2 * time.Second}, 2164 }, 2165 expectDropTimestamp: true, 2166 }, 2167 { 2168 tags: map[string]string{ 2169 nameTag: "http_requests", 2170 "app": "nginx_edge", 2171 "status_code": "500", 2172 "endpoint": "/foo/bar", 2173 "not_rolled_up": "not_rolled_up_value_2", 2174 }, 2175 timedSamples: []testGaugeMetricTimedSample{ 2176 {value: 13}, 2177 {value: 27, offset: 2 * time.Second}, 2178 }, 2179 expectDropTimestamp: true, 2180 }, 2181 } 2182 res := 1 * time.Second 2183 ret := 30 * 24 * time.Hour 2184 filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag) 2185 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2186 untimedRollups: true, 2187 rulesConfig: &RulesConfiguration{ 2188 MappingRules: []MappingRuleConfiguration{ 2189 { 2190 Filter: "app:nginx*", 2191 Aggregations: []aggregation.Type{aggregation.Max}, 2192 StoragePolicies: []StoragePolicyConfiguration{ 2193 { 2194 Resolution: 1 * time.Second, 2195 Retention: 30 * 24 * time.Hour, 2196 }, 2197 }, 2198 }, 2199 }, 2200 RollupRules: []RollupRuleConfiguration{ 2201 { 2202 Filter: filter, 2203 Transforms: []TransformConfiguration{ 2204 { 2205 Rollup: &RollupOperationConfiguration{ 2206 MetricName: "http_requests_by_status_code", 2207 GroupBy: []string{"app", "status_code", "endpoint"}, 2208 Aggregations: []aggregation.Type{aggregation.Sum}, 2209 }, 2210 }, 2211 }, 2212 StoragePolicies: []StoragePolicyConfiguration{ 2213 { 2214 Resolution: res, 2215 Retention: ret, 2216 }, 2217 }, 2218 }, 2219 }, 2220 }, 2221 ingest: &testDownsamplerOptionsIngest{ 2222 gaugeMetrics: gaugeMetrics, 2223 }, 2224 expect: &testDownsamplerOptionsExpect{ 2225 writes: []testExpectedWrite{ 2226 { 2227 tags: map[string]string{ 2228 nameTag: "http_requests_by_status_code", 2229 string(rollupTagName): string(rollupTagValue), 2230 "app": "nginx_edge", 2231 "status_code": "500", 2232 "endpoint": "/foo/bar", 2233 }, 2234 values: []expectedValue{{value: 94}}, 2235 attributes: &storagemetadata.Attributes{ 2236 MetricsType: storagemetadata.AggregatedMetricsType, 2237 Resolution: res, 2238 Retention: ret, 2239 }, 2240 }, 2241 }, 2242 }, 2243 }) 2244 2245 // Test expected output 2246 testDownsamplerAggregation(t, testDownsampler) 2247 } 2248 2249 func TestDownsamplerAggregationWithRulesConfigRollupRuleUntimedRollupsWaitForOffset(t *testing.T) { 2250 t.Parallel() 2251 2252 gaugeMetrics := []testGaugeMetric{ 2253 { 2254 tags: map[string]string{ 2255 nameTag: "http_requests", 2256 "app": "nginx_edge", 2257 "status_code": "500", 2258 "endpoint": "/foo/bar", 2259 "not_rolled_up": "not_rolled_up_value_1", 2260 }, 2261 timedSamples: []testGaugeMetricTimedSample{ 2262 {value: 42}, 2263 }, 2264 expectDropPolicyApplied: true, 2265 expectDropTimestamp: true, 2266 }, 2267 { 2268 tags: map[string]string{ 2269 nameTag: "http_requests", 2270 "app": "nginx_edge", 2271 "status_code": "500", 2272 "endpoint": "/foo/bar", 2273 "not_rolled_up": "not_rolled_up_value_2", 2274 }, 2275 timedSamples: []testGaugeMetricTimedSample{ 2276 {value: 12, offset: 2 * time.Second}, 2277 }, 2278 expectDropPolicyApplied: true, 2279 expectDropTimestamp: true, 2280 }, 2281 { 2282 tags: map[string]string{ 2283 nameTag: "http_requests", 2284 "app": "nginx_edge", 2285 "status_code": "500", 2286 "endpoint": "/foo/bar", 2287 "not_rolled_up": "not_rolled_up_value_3", 2288 }, 2289 timedSamples: []testGaugeMetricTimedSample{ 2290 {value: 13}, 2291 }, 2292 expectDropPolicyApplied: true, 2293 expectDropTimestamp: true, 2294 }, 2295 } 2296 res := 1 * time.Second 2297 ret := 30 * 24 * time.Hour 2298 filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag) 2299 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2300 waitForOffset: true, 2301 untimedRollups: true, 2302 rulesConfig: &RulesConfiguration{ 2303 MappingRules: []MappingRuleConfiguration{ 2304 { 2305 Filter: filter, 2306 Drop: true, 2307 }, 2308 }, 2309 RollupRules: []RollupRuleConfiguration{ 2310 { 2311 Filter: filter, 2312 Transforms: []TransformConfiguration{ 2313 { 2314 Rollup: &RollupOperationConfiguration{ 2315 MetricName: "http_requests_by_status_code", 2316 GroupBy: []string{"app", "status_code", "endpoint"}, 2317 Aggregations: []aggregation.Type{aggregation.Sum}, 2318 }, 2319 }, 2320 }, 2321 StoragePolicies: []StoragePolicyConfiguration{ 2322 { 2323 Resolution: res, 2324 Retention: ret, 2325 }, 2326 }, 2327 }, 2328 }, 2329 }, 2330 ingest: &testDownsamplerOptionsIngest{ 2331 gaugeMetrics: gaugeMetrics, 2332 }, 2333 expect: &testDownsamplerOptionsExpect{ 2334 writes: []testExpectedWrite{ 2335 { 2336 tags: map[string]string{ 2337 nameTag: "http_requests_by_status_code", 2338 string(rollupTagName): string(rollupTagValue), 2339 "app": "nginx_edge", 2340 "status_code": "500", 2341 "endpoint": "/foo/bar", 2342 }, 2343 values: []expectedValue{{value: 42}, {value: 25}}, 2344 attributes: &storagemetadata.Attributes{ 2345 MetricsType: storagemetadata.AggregatedMetricsType, 2346 Resolution: res, 2347 Retention: ret, 2348 }, 2349 }, 2350 }, 2351 }, 2352 }) 2353 2354 // Test expected output 2355 testDownsamplerAggregation(t, testDownsampler) 2356 } 2357 2358 func TestDownsamplerAggregationWithRulesConfigRollupRuleRollupLaterUntimedRollups(t *testing.T) { 2359 t.Parallel() 2360 2361 gaugeMetrics := []testGaugeMetric{ 2362 { 2363 tags: map[string]string{ 2364 nameTag: "http_requests", 2365 "app": "nginx_edge", 2366 "status_code": "500", 2367 "endpoint": "/foo/bar", 2368 "not_rolled_up": "not_rolled_up_value_1", 2369 }, 2370 timedSamples: []testGaugeMetricTimedSample{ 2371 {value: 42}, 2372 {value: 12, offset: 2 * time.Second}, 2373 }, 2374 expectDropTimestamp: true, 2375 }, 2376 { 2377 tags: map[string]string{ 2378 nameTag: "http_requests", 2379 "app": "nginx_edge", 2380 "status_code": "500", 2381 "endpoint": "/foo/bar", 2382 "not_rolled_up": "not_rolled_up_value_2", 2383 }, 2384 timedSamples: []testGaugeMetricTimedSample{ 2385 {value: 13}, 2386 {value: 27, offset: 2 * time.Second}, 2387 }, 2388 expectDropTimestamp: true, 2389 }, 2390 } 2391 res := 1 * time.Second 2392 ret := 30 * 24 * time.Hour 2393 filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag) 2394 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2395 untimedRollups: true, 2396 rulesConfig: &RulesConfiguration{ 2397 MappingRules: []MappingRuleConfiguration{ 2398 { 2399 Filter: "app:nginx*", 2400 Aggregations: []aggregation.Type{aggregation.Max}, 2401 StoragePolicies: []StoragePolicyConfiguration{ 2402 { 2403 Resolution: 1 * time.Second, 2404 Retention: 30 * 24 * time.Hour, 2405 }, 2406 }, 2407 }, 2408 }, 2409 RollupRules: []RollupRuleConfiguration{ 2410 { 2411 Filter: filter, 2412 Transforms: []TransformConfiguration{ 2413 { 2414 Transform: &TransformOperationConfiguration{ 2415 Type: transformation.Add, 2416 }, 2417 }, 2418 { 2419 Rollup: &RollupOperationConfiguration{ 2420 MetricName: "http_requests_by_status_code", 2421 GroupBy: []string{"app", "status_code", "endpoint"}, 2422 Aggregations: []aggregation.Type{aggregation.Sum}, 2423 }, 2424 }, 2425 }, 2426 StoragePolicies: []StoragePolicyConfiguration{ 2427 { 2428 Resolution: res, 2429 Retention: ret, 2430 }, 2431 }, 2432 }, 2433 }, 2434 }, 2435 ingest: &testDownsamplerOptionsIngest{ 2436 gaugeMetrics: gaugeMetrics, 2437 }, 2438 expect: &testDownsamplerOptionsExpect{ 2439 writes: []testExpectedWrite{ 2440 { 2441 tags: map[string]string{ 2442 nameTag: "http_requests_by_status_code", 2443 string(rollupTagName): string(rollupTagValue), 2444 "app": "nginx_edge", 2445 "status_code": "500", 2446 "endpoint": "/foo/bar", 2447 }, 2448 values: []expectedValue{{value: 39}}, 2449 attributes: &storagemetadata.Attributes{ 2450 MetricsType: storagemetadata.AggregatedMetricsType, 2451 Resolution: res, 2452 Retention: ret, 2453 }, 2454 }, 2455 }, 2456 }, 2457 }) 2458 2459 // Test expected output 2460 testDownsamplerAggregation(t, testDownsampler) 2461 } 2462 2463 func TestDownsamplerAggregationWithRulesConfigRollupRulesExcludeByLastMean(t *testing.T) { 2464 t.Parallel() 2465 2466 gaugeMetrics := []testGaugeMetric{ 2467 { 2468 tags: map[string]string{ 2469 nameTag: "http_request_latency_max_gauge", 2470 "app": "nginx_edge", 2471 "status_code": "500", 2472 "endpoint": "/foo/bar", 2473 "instance": "not_rolled_up_instance_1", 2474 }, 2475 timedSamples: []testGaugeMetricTimedSample{ 2476 {value: 42}, 2477 }, 2478 }, 2479 { 2480 tags: map[string]string{ 2481 nameTag: "http_request_latency_max_gauge", 2482 "app": "nginx_edge", 2483 "status_code": "500", 2484 "endpoint": "/foo/bar", 2485 "instance": "not_rolled_up_instance_2", 2486 }, 2487 timedSamples: []testGaugeMetricTimedSample{ 2488 {value: 13}, 2489 }, 2490 }, 2491 } 2492 res := 1 * time.Second 2493 ret := 30 * 24 * time.Hour 2494 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2495 rulesConfig: &RulesConfiguration{ 2496 RollupRules: []RollupRuleConfiguration{ 2497 { 2498 Filter: fmt.Sprintf( 2499 "%s:http_request_latency_max_gauge app:* status_code:* endpoint:*", 2500 nameTag), 2501 Transforms: []TransformConfiguration{ 2502 { 2503 Aggregate: &AggregateOperationConfiguration{ 2504 Type: aggregation.Last, 2505 }, 2506 }, 2507 { 2508 Rollup: &RollupOperationConfiguration{ 2509 MetricName: "{{ .MetricName }}:mean_without_instance", 2510 ExcludeBy: []string{"instance"}, 2511 Aggregations: []aggregation.Type{aggregation.Mean}, 2512 }, 2513 }, 2514 }, 2515 StoragePolicies: []StoragePolicyConfiguration{ 2516 { 2517 Resolution: res, 2518 Retention: ret, 2519 }, 2520 }, 2521 }, 2522 }, 2523 }, 2524 ingest: &testDownsamplerOptionsIngest{ 2525 gaugeMetrics: gaugeMetrics, 2526 }, 2527 expect: &testDownsamplerOptionsExpect{ 2528 writes: []testExpectedWrite{ 2529 { 2530 tags: map[string]string{ 2531 nameTag: "http_request_latency_max_gauge:mean_without_instance", 2532 string(rollupTagName): string(rollupTagValue), 2533 "app": "nginx_edge", 2534 "status_code": "500", 2535 "endpoint": "/foo/bar", 2536 }, 2537 values: []expectedValue{ 2538 {value: 27.5}, 2539 }, 2540 attributes: &storagemetadata.Attributes{ 2541 MetricsType: storagemetadata.AggregatedMetricsType, 2542 Resolution: res, 2543 Retention: ret, 2544 }, 2545 }, 2546 }, 2547 }, 2548 }) 2549 2550 // Test expected output 2551 testDownsamplerAggregation(t, testDownsampler) 2552 } 2553 2554 func TestDownsamplerAggregationWithRulesConfigRollupRulesExcludeByIncreaseSumAdd(t *testing.T) { 2555 t.Parallel() 2556 2557 // nolint:dupl 2558 gaugeMetrics := []testGaugeMetric{ 2559 { 2560 tags: map[string]string{ 2561 nameTag: "http_requests", 2562 "app": "nginx_edge", 2563 "status_code": "500", 2564 "endpoint": "/foo/bar", 2565 "instance": "not_rolled_up_instance_1", 2566 }, 2567 timedSamples: []testGaugeMetricTimedSample{ 2568 {value: 42, offset: 1 * time.Second}, // +42 2569 {value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should count as a 0) 2570 {value: 33, offset: 3 * time.Second}, // +21 2571 }, 2572 }, 2573 { 2574 tags: map[string]string{ 2575 nameTag: "http_requests", 2576 "app": "nginx_edge", 2577 "status_code": "500", 2578 "endpoint": "/foo/bar", 2579 "instance": "not_rolled_up_instance_2", 2580 }, 2581 timedSamples: []testGaugeMetricTimedSample{ 2582 {value: 13, offset: 1 * time.Second}, // +13 2583 {value: 27, offset: 2 * time.Second}, // +14 2584 {value: 42, offset: 3 * time.Second}, // +15 2585 }, 2586 }, 2587 } 2588 res := 1 * time.Second 2589 ret := 30 * 24 * time.Hour 2590 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2591 rulesConfig: &RulesConfiguration{ 2592 RollupRules: []RollupRuleConfiguration{ 2593 { 2594 Filter: fmt.Sprintf( 2595 "%s:http_requests app:* status_code:* endpoint:*", 2596 nameTag), 2597 Transforms: []TransformConfiguration{ 2598 { 2599 Transform: &TransformOperationConfiguration{ 2600 Type: transformation.Increase, 2601 }, 2602 }, 2603 { 2604 Rollup: &RollupOperationConfiguration{ 2605 MetricName: "{{ .MetricName }}:sum_without_instance", 2606 ExcludeBy: []string{"instance"}, 2607 Aggregations: []aggregation.Type{aggregation.Sum}, 2608 }, 2609 }, 2610 { 2611 Transform: &TransformOperationConfiguration{ 2612 Type: transformation.Add, 2613 }, 2614 }, 2615 }, 2616 StoragePolicies: []StoragePolicyConfiguration{ 2617 { 2618 Resolution: res, 2619 Retention: ret, 2620 }, 2621 }, 2622 }, 2623 }, 2624 }, 2625 ingest: &testDownsamplerOptionsIngest{ 2626 gaugeMetrics: gaugeMetrics, 2627 }, 2628 expect: &testDownsamplerOptionsExpect{ 2629 writes: []testExpectedWrite{ 2630 { 2631 tags: map[string]string{ 2632 nameTag: "http_requests:sum_without_instance", 2633 string(rollupTagName): string(rollupTagValue), 2634 "app": "nginx_edge", 2635 "status_code": "500", 2636 "endpoint": "/foo/bar", 2637 }, 2638 values: []expectedValue{ 2639 {value: 55}, 2640 {value: 69, offset: 1 * time.Second}, 2641 {value: 105, offset: 2 * time.Second}, 2642 }, 2643 attributes: &storagemetadata.Attributes{ 2644 MetricsType: storagemetadata.AggregatedMetricsType, 2645 Resolution: res, 2646 Retention: ret, 2647 }, 2648 }, 2649 }, 2650 }, 2651 }) 2652 2653 // Test expected output 2654 testDownsamplerAggregation(t, testDownsampler) 2655 } 2656 2657 func TestDownsamplerAggregationWithTimedSamples(t *testing.T) { 2658 counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{ 2659 timedSamples: true, 2660 }) 2661 gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{ 2662 timedSamples: true, 2663 }) 2664 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2665 ingest: &testDownsamplerOptionsIngest{ 2666 counterMetrics: counterMetrics, 2667 gaugeMetrics: gaugeMetrics, 2668 }, 2669 expect: &testDownsamplerOptionsExpect{ 2670 writes: append(counterMetricsExpect, gaugeMetricsExpect...), 2671 }, 2672 rulesConfig: &RulesConfiguration{ 2673 MappingRules: []MappingRuleConfiguration{ 2674 { 2675 Filter: "__name__:*", 2676 Aggregations: []aggregation.Type{testAggregationType}, 2677 StoragePolicies: []StoragePolicyConfiguration{ 2678 { 2679 Resolution: 2 * time.Second, 2680 Retention: 24 * time.Hour, 2681 }, 2682 }, 2683 }, 2684 }, 2685 }, 2686 }) 2687 2688 // Test expected output 2689 testDownsamplerAggregation(t, testDownsampler) 2690 } 2691 2692 func TestDownsamplerAggregationWithOverrideRules(t *testing.T) { 2693 counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{}) 2694 counterMetricsExpect[0].values = []expectedValue{{value: 2}} 2695 2696 gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{}) 2697 gaugeMetricsExpect[0].values = []expectedValue{{value: 5}} 2698 2699 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2700 sampleAppenderOpts: &SampleAppenderOptions{ 2701 Override: true, 2702 OverrideRules: SamplesAppenderOverrideRules{ 2703 MappingRules: []AutoMappingRule{ 2704 { 2705 Aggregations: []aggregation.Type{aggregation.Mean}, 2706 Policies: []policy.StoragePolicy{ 2707 policy.MustParseStoragePolicy("4s:1d"), 2708 }, 2709 }, 2710 }, 2711 }, 2712 }, 2713 rulesConfig: &RulesConfiguration{ 2714 MappingRules: []MappingRuleConfiguration{ 2715 { 2716 Filter: "__name__:*", 2717 Aggregations: []aggregation.Type{testAggregationType}, 2718 StoragePolicies: []StoragePolicyConfiguration{ 2719 { 2720 Resolution: 2 * time.Second, 2721 Retention: 24 * time.Hour, 2722 }, 2723 }, 2724 }, 2725 }, 2726 }, 2727 ingest: &testDownsamplerOptionsIngest{ 2728 counterMetrics: counterMetrics, 2729 gaugeMetrics: gaugeMetrics, 2730 }, 2731 expect: &testDownsamplerOptionsExpect{ 2732 writes: append(counterMetricsExpect, gaugeMetricsExpect...), 2733 }, 2734 }) 2735 2736 // Test expected output 2737 testDownsamplerAggregation(t, testDownsampler) 2738 } 2739 2740 func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) { 2741 ctrl := xtest.NewController(t) 2742 defer ctrl.Finish() 2743 2744 // Create mock client 2745 remoteClientMock := client.NewMockClient(ctrl) 2746 remoteClientMock.EXPECT().Init().Return(nil) 2747 2748 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2749 rulesConfig: &RulesConfiguration{ 2750 MappingRules: []MappingRuleConfiguration{ 2751 { 2752 Filter: "__name__:*", 2753 Aggregations: []aggregation.Type{testAggregationType}, 2754 StoragePolicies: []StoragePolicyConfiguration{ 2755 { 2756 Resolution: 2 * time.Second, 2757 Retention: 24 * time.Hour, 2758 }, 2759 }, 2760 }, 2761 }, 2762 }, 2763 remoteClientMock: remoteClientMock, 2764 }) 2765 2766 // Test expected output 2767 testDownsamplerRemoteAggregation(t, testDownsampler) 2768 } 2769 2770 func TestDownsamplerWithOverrideNamespace(t *testing.T) { 2771 overrideNamespaceTag := "override_namespace_tag" 2772 2773 gaugeMetric := testGaugeMetric{ 2774 tags: map[string]string{ 2775 nameTag: "http_requests", 2776 "app": "nginx_edge", 2777 "status_code": "500", 2778 "endpoint": "/foo/bar", 2779 "not_rolled_up": "not_rolled_up_value", 2780 // Set namespace tags on ingested metrics. 2781 // The test demonstrates that overrideNamespaceTag is respected, meaning setting 2782 // values on defaultNamespaceTag won't affect aggregation. 2783 defaultNamespaceTag: "namespace_ignored", 2784 }, 2785 timedSamples: []testGaugeMetricTimedSample{ 2786 {value: 42}, 2787 {value: 64, offset: 5 * time.Second}, 2788 }, 2789 } 2790 res := 5 * time.Second 2791 ret := 30 * 24 * time.Hour 2792 testDownsampler := newTestDownsampler(t, testDownsamplerOptions{ 2793 rulesConfig: &RulesConfiguration{ 2794 RollupRules: []RollupRuleConfiguration{ 2795 { 2796 Filter: fmt.Sprintf( 2797 "%s:http_requests app:* status_code:* endpoint:*", 2798 nameTag), 2799 Transforms: []TransformConfiguration{ 2800 { 2801 Transform: &TransformOperationConfiguration{ 2802 Type: transformation.PerSecond, 2803 }, 2804 }, 2805 { 2806 Rollup: &RollupOperationConfiguration{ 2807 MetricName: "http_requests_by_status_code", 2808 GroupBy: []string{"app", "status_code", "endpoint"}, 2809 Aggregations: []aggregation.Type{aggregation.Sum}, 2810 }, 2811 }, 2812 }, 2813 StoragePolicies: []StoragePolicyConfiguration{ 2814 { 2815 Resolution: res, 2816 Retention: ret, 2817 }, 2818 }, 2819 }, 2820 }, 2821 }, 2822 matcherConfig: MatcherConfiguration{NamespaceTag: overrideNamespaceTag}, 2823 ingest: &testDownsamplerOptionsIngest{ 2824 gaugeMetrics: []testGaugeMetric{gaugeMetric}, 2825 }, 2826 expect: &testDownsamplerOptionsExpect{ 2827 writes: []testExpectedWrite{ 2828 { 2829 tags: map[string]string{ 2830 nameTag: "http_requests_by_status_code", 2831 string(rollupTagName): string(rollupTagValue), 2832 "app": "nginx_edge", 2833 "status_code": "500", 2834 "endpoint": "/foo/bar", 2835 }, 2836 values: []expectedValue{{value: 4.4}}, 2837 attributes: &storagemetadata.Attributes{ 2838 MetricsType: storagemetadata.AggregatedMetricsType, 2839 Resolution: res, 2840 Retention: ret, 2841 }, 2842 }, 2843 }, 2844 }, 2845 }) 2846 2847 // Test expected output 2848 testDownsamplerAggregation(t, testDownsampler) 2849 } 2850 2851 func TestSafeguardInProcessDownsampler(t *testing.T) { 2852 ctrl := gomock.NewController(t) 2853 defer ctrl.Finish() 2854 2855 store := kv.NewMockStore(ctrl) 2856 store.EXPECT().SetIfNotExists(gomock.Eq(matcher.NewOptions().NamespacesKey()), gomock.Any()).Return(0, nil) 2857 2858 // explicitly asserting that no more mutations are done for original store. 2859 store.EXPECT().Set(gomock.Any(), gomock.Any()).Times(0) 2860 store.EXPECT().CheckAndSet(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) 2861 store.EXPECT().Delete(gomock.Any()).Times(0) 2862 2863 _ = newTestDownsampler(t, testDownsamplerOptions{ 2864 remoteClientMock: nil, 2865 kvStore: store, 2866 }) 2867 } 2868 2869 func TestDownsamplerNamespacesEtcdInit(t *testing.T) { 2870 t.Run("does not reset namespaces key", func(t *testing.T) { 2871 store := mem.NewStore() 2872 initialNamespaces := rulepb.Namespaces{Namespaces: []*rulepb.Namespace{{Name: "testNamespace"}}} 2873 _, err := store.Set(matcher.NewOptions().NamespacesKey(), &initialNamespaces) 2874 require.NoError(t, err) 2875 2876 _ = newTestDownsampler(t, testDownsamplerOptions{kvStore: store}) 2877 2878 assert.Equal(t, initialNamespaces, readNamespacesKey(t, store), 1) 2879 }) 2880 2881 t.Run("initializes namespace key", func(t *testing.T) { 2882 store := mem.NewStore() 2883 2884 _ = newTestDownsampler(t, testDownsamplerOptions{kvStore: store}) 2885 2886 ns := readNamespacesKey(t, store) 2887 require.NotNil(t, ns) 2888 assert.Len(t, ns.Namespaces, 0) 2889 }) 2890 2891 t.Run("does not initialize namespaces key when RequireNamespaceWatchOnInit is true", func(t *testing.T) { 2892 store := mem.NewStore() 2893 2894 matcherConfig := MatcherConfiguration{RequireNamespaceWatchOnInit: true} 2895 _ = newTestDownsampler(t, testDownsamplerOptions{kvStore: store, matcherConfig: matcherConfig}) 2896 2897 _, err := store.Get(matcher.NewOptions().NamespacesKey()) 2898 require.Error(t, err) 2899 }) 2900 } 2901 2902 func originalStagedMetadata(t *testing.T, testDownsampler testDownsampler) []metricpb.StagedMetadatas { 2903 ds, ok := testDownsampler.downsampler.(*downsampler) 2904 require.True(t, ok) 2905 2906 origStagedMetadata := ds.metricsAppenderOpts.defaultStagedMetadatasProtos 2907 return origStagedMetadata 2908 } 2909 2910 func waitForStagedMetadataUpdate(t *testing.T, testDownsampler testDownsampler, origStagedMetadata []metricpb.StagedMetadatas) { 2911 ds, ok := testDownsampler.downsampler.(*downsampler) 2912 require.True(t, ok) 2913 2914 require.True(t, clock.WaitUntil(func() bool { 2915 ds.RLock() 2916 defer ds.RUnlock() 2917 2918 return !assert.ObjectsAreEqual(origStagedMetadata, ds.metricsAppenderOpts.defaultStagedMetadatasProtos) 2919 }, time.Second)) 2920 } 2921 2922 func waitForEnabledUpdate(t *testing.T, testDownsampler *testDownsampler, current bool) { 2923 ds, ok := testDownsampler.downsampler.(*downsampler) 2924 require.True(t, ok) 2925 2926 require.True(t, clock.WaitUntil(func() bool { 2927 ds.RLock() 2928 defer ds.RUnlock() 2929 2930 return current != ds.enabled 2931 }, time.Second)) 2932 } 2933 2934 type testExpectedWrite struct { 2935 tags map[string]string 2936 values []expectedValue // use values for multi expected values 2937 valueAllowedError float64 // use for allowing for slightly inexact values due to timing, etc 2938 attributes *storagemetadata.Attributes 2939 } 2940 2941 type expectedValue struct { 2942 offset time.Duration 2943 value float64 2944 } 2945 2946 type testCounterMetric struct { 2947 tags map[string]string 2948 samples []int64 2949 timedSamples []testCounterMetricTimedSample 2950 expectDropPolicyApplied bool 2951 expectDropTimestamp bool 2952 } 2953 2954 type testCounterMetricTimedSample struct { 2955 time xtime.UnixNano 2956 offset time.Duration 2957 value int64 2958 } 2959 2960 type testGaugeMetric struct { 2961 tags map[string]string 2962 samples []float64 2963 timedSamples []testGaugeMetricTimedSample 2964 expectDropPolicyApplied bool 2965 expectDropTimestamp bool 2966 } 2967 2968 type testGaugeMetricTimedSample struct { 2969 time xtime.UnixNano 2970 offset time.Duration 2971 value float64 2972 } 2973 2974 type testTimerMetric struct { 2975 tags map[string]string 2976 samples []float64 2977 timedSamples []testTimerMetricTimedSample 2978 expectDropPolicyApplied bool 2979 expectDropTimestamp bool 2980 } 2981 2982 type testTimerMetricTimedSample struct { 2983 time xtime.UnixNano 2984 offset time.Duration 2985 value float64 2986 } 2987 2988 type testCounterMetricsOptions struct { 2989 timedSamples bool 2990 } 2991 2992 func testCounterMetrics(opts testCounterMetricsOptions) ( 2993 []testCounterMetric, 2994 []testExpectedWrite, 2995 ) { 2996 metric := testCounterMetric{ 2997 tags: map[string]string{nameTag: "counter0", "app": "testapp", "foo": "bar"}, 2998 samples: []int64{1, 2, 3}, 2999 } 3000 if opts.timedSamples { 3001 metric.samples = nil 3002 metric.timedSamples = []testCounterMetricTimedSample{ 3003 {value: 1}, {value: 2}, {value: 3}, 3004 } 3005 } 3006 write := testExpectedWrite{ 3007 tags: metric.tags, 3008 values: []expectedValue{{value: 6}}, 3009 } 3010 return []testCounterMetric{metric}, []testExpectedWrite{write} 3011 } 3012 3013 type testGaugeMetricsOptions struct { 3014 timedSamples bool 3015 } 3016 3017 func testGaugeMetrics(opts testGaugeMetricsOptions) ([]testGaugeMetric, []testExpectedWrite) { 3018 metric := testGaugeMetric{ 3019 tags: map[string]string{nameTag: "gauge0", "app": "testapp", "qux": "qaz"}, 3020 samples: []float64{4, 5, 6}, 3021 } 3022 if opts.timedSamples { 3023 metric.samples = nil 3024 metric.timedSamples = []testGaugeMetricTimedSample{ 3025 {value: 4}, 3026 {value: 5}, 3027 {value: 6, offset: 1 * time.Nanosecond}, 3028 } 3029 } 3030 write := testExpectedWrite{ 3031 tags: metric.tags, 3032 values: []expectedValue{{value: 15}}, 3033 } 3034 return []testGaugeMetric{metric}, []testExpectedWrite{write} 3035 } 3036 3037 func testDownsamplerAggregation( 3038 t *testing.T, 3039 testDownsampler testDownsampler, 3040 ) { 3041 testOpts := testDownsampler.testOpts 3042 3043 logger := testDownsampler.instrumentOpts.Logger(). 3044 With(zap.String("test", t.Name())) 3045 3046 counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{}) 3047 gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{}) 3048 expectedWrites := append(counterMetricsExpect, gaugeMetricsExpect...) 3049 3050 // Allow overrides 3051 var ( 3052 allowFilter *testDownsamplerOptionsExpectAllowFilter 3053 timerMetrics []testTimerMetric 3054 ) 3055 if ingest := testOpts.ingest; ingest != nil { 3056 counterMetrics = ingest.counterMetrics 3057 gaugeMetrics = ingest.gaugeMetrics 3058 timerMetrics = ingest.timerMetrics 3059 } 3060 if expect := testOpts.expect; expect != nil { 3061 expectedWrites = expect.writes 3062 allowFilter = expect.allowFilter 3063 } 3064 3065 // Ingest points 3066 testDownsamplerAggregationIngest(t, testDownsampler, 3067 counterMetrics, gaugeMetrics, timerMetrics) 3068 3069 // Wait for writes 3070 logger.Info("wait for test metrics to appear") 3071 logWritesAccumulated := os.Getenv("TEST_LOG_WRITES_ACCUMULATED") == "true" 3072 logWritesAccumulatedTicker := time.NewTicker(time.Second) 3073 3074 logWritesMatch := os.Getenv("TEST_LOG_WRITES_MATCH") == "true" 3075 logWritesMatchTicker := time.NewTicker(time.Second) 3076 3077 identTag := nameTag 3078 if len(testDownsampler.testOpts.identTag) > 0 { 3079 identTag = testDownsampler.testOpts.identTag 3080 } 3081 3082 CheckAllWritesArrivedLoop: 3083 for { 3084 allWrites := testDownsampler.storage.Writes() 3085 if logWritesAccumulated { 3086 select { 3087 case <-logWritesAccumulatedTicker.C: 3088 logger.Info("logging accmulated writes", 3089 zap.Int("numAllWrites", len(allWrites))) 3090 for _, write := range allWrites { 3091 logger.Info("accumulated write", 3092 zap.ByteString("tags", write.Tags().ID()), 3093 zap.Any("datapoints", write.Datapoints()), 3094 zap.Any("attributes", write.Attributes())) 3095 } 3096 default: 3097 } 3098 } 3099 3100 for _, expectedWrite := range expectedWrites { 3101 name := expectedWrite.tags[identTag] 3102 attrs := expectedWrite.attributes 3103 writesForNameAndAttrs, _ := findWrites(allWrites, name, identTag, attrs) 3104 if len(writesForNameAndAttrs) != len(expectedWrite.values) { 3105 if logWritesMatch { 3106 select { 3107 case <-logWritesMatchTicker.C: 3108 logger.Info("continuing wait for accumulated writes", 3109 zap.String("name", name), 3110 zap.Any("attributes", attrs), 3111 zap.Int("numWritesForNameAndAttrs", len(writesForNameAndAttrs)), 3112 zap.Int("numExpectedWriteValues", len(expectedWrite.values)), 3113 ) 3114 default: 3115 } 3116 } 3117 3118 time.Sleep(100 * time.Millisecond) 3119 continue CheckAllWritesArrivedLoop 3120 } 3121 } 3122 break 3123 } 3124 3125 // Verify writes 3126 logger.Info("verify test metrics") 3127 allWrites := testDownsampler.storage.Writes() 3128 if logWritesAccumulated { 3129 logger.Info("logging accmulated writes to verify", 3130 zap.Int("numAllWrites", len(allWrites))) 3131 for _, write := range allWrites { 3132 logger.Info("accumulated write", 3133 zap.ByteString("tags", write.Tags().ID()), 3134 zap.Any("datapoints", write.Datapoints())) 3135 } 3136 } 3137 3138 for _, expectedWrite := range expectedWrites { 3139 name := expectedWrite.tags[identTag] 3140 expectedValues := expectedWrite.values 3141 allowedError := expectedWrite.valueAllowedError 3142 3143 writesForNameAndAttrs, found := findWrites(allWrites, name, identTag, expectedWrite.attributes) 3144 require.True(t, found) 3145 require.Equal(t, len(expectedValues), len(writesForNameAndAttrs)) 3146 for i, expectedValue := range expectedValues { 3147 write := writesForNameAndAttrs[i] 3148 3149 assert.Equal(t, expectedWrite.tags, tagsToStringMap(write.Tags())) 3150 3151 require.Equal(t, 1, len(write.Datapoints())) 3152 3153 actualValue := write.Datapoints()[0].Value 3154 if allowedError == 0 { 3155 // Exact match value. 3156 assert.Equal(t, expectedValue.value, actualValue) 3157 } else { 3158 // Fuzzy match value. 3159 lower := expectedValue.value - allowedError 3160 upper := expectedValue.value + allowedError 3161 withinBounds := (lower <= actualValue) && (actualValue <= upper) 3162 msg := fmt.Sprintf("expected within: lower=%f, upper=%f, actual=%f", 3163 lower, upper, actualValue) 3164 assert.True(t, withinBounds, msg) 3165 } 3166 3167 if expectedOffset := expectedValue.offset; expectedOffset > 0 { 3168 // Check if distance between datapoints as expected (use 3169 // absolute offset from first write). 3170 firstTimestamp := writesForNameAndAttrs[0].Datapoints()[0].Timestamp 3171 actualOffset := write.Datapoints()[0].Timestamp.Sub(firstTimestamp) 3172 assert.Equal(t, expectedOffset, actualOffset) 3173 } 3174 3175 if attrs := expectedWrite.attributes; attrs != nil { 3176 assert.Equal(t, *attrs, write.Attributes()) 3177 } 3178 } 3179 } 3180 3181 if allowFilter == nil { 3182 return // No allow filter checking required. 3183 } 3184 3185 for _, write := range testDownsampler.storage.Writes() { 3186 attrs := write.Attributes() 3187 foundMatchingAttribute := false 3188 for _, allowed := range allowFilter.attributes { 3189 if allowed == attrs { 3190 foundMatchingAttribute = true 3191 break 3192 } 3193 } 3194 assert.True(t, foundMatchingAttribute, 3195 fmt.Sprintf("attribute not allowed: allowed=%v, actual=%v", 3196 allowFilter.attributes, attrs)) 3197 } 3198 } 3199 3200 func testDownsamplerRemoteAggregation( 3201 t *testing.T, 3202 testDownsampler testDownsampler, 3203 ) { 3204 testOpts := testDownsampler.testOpts 3205 3206 expectTestCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{}) 3207 testCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{}) 3208 3209 expectTestGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{}) 3210 testGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{}) 3211 3212 remoteClientMock := testOpts.remoteClientMock 3213 require.NotNil(t, remoteClientMock) 3214 3215 // Expect ingestion 3216 checkedCounterSamples := 0 3217 remoteClientMock.EXPECT(). 3218 WriteUntimedCounter(gomock.Any(), gomock.Any()). 3219 AnyTimes(). 3220 Do(func(counter unaggregated.Counter, 3221 metadatas metadata.StagedMetadatas, 3222 ) error { 3223 for _, c := range expectTestCounterMetrics { 3224 if !strings.Contains(counter.ID.String(), c.tags[nameTag]) { 3225 continue 3226 } 3227 3228 var remainingSamples []int64 3229 found := false 3230 for _, s := range c.samples { 3231 if !found && s == counter.Value { 3232 found = true 3233 } else { 3234 remainingSamples = append(remainingSamples, s) 3235 } 3236 } 3237 c.samples = remainingSamples 3238 if found { 3239 checkedCounterSamples++ 3240 } 3241 3242 break 3243 } 3244 3245 return nil 3246 }) 3247 3248 checkedGaugeSamples := 0 3249 remoteClientMock.EXPECT(). 3250 WriteUntimedGauge(gomock.Any(), gomock.Any()). 3251 AnyTimes(). 3252 Do(func(gauge unaggregated.Gauge, 3253 metadatas metadata.StagedMetadatas, 3254 ) error { 3255 for _, g := range expectTestGaugeMetrics { 3256 if !strings.Contains(gauge.ID.String(), g.tags[nameTag]) { 3257 continue 3258 } 3259 3260 var remainingSamples []float64 3261 found := false 3262 for _, s := range g.samples { 3263 if !found && s == gauge.Value { 3264 found = true 3265 } else { 3266 remainingSamples = append(remainingSamples, s) 3267 } 3268 } 3269 g.samples = remainingSamples 3270 if found { 3271 checkedGaugeSamples++ 3272 } 3273 3274 break 3275 } 3276 3277 return nil 3278 }) 3279 3280 // Ingest points 3281 testDownsamplerAggregationIngest(t, testDownsampler, 3282 testCounterMetrics, testGaugeMetrics, []testTimerMetric{}) 3283 3284 // Ensure we checked counters and gauges 3285 samplesCounters := 0 3286 for _, c := range testCounterMetrics { 3287 samplesCounters += len(c.samples) 3288 } 3289 samplesGauges := 0 3290 for _, c := range testGaugeMetrics { 3291 samplesGauges += len(c.samples) 3292 } 3293 require.Equal(t, samplesCounters, checkedCounterSamples) 3294 require.Equal(t, samplesGauges, checkedGaugeSamples) 3295 } 3296 3297 func testDownsamplerAggregationIngest( 3298 t *testing.T, 3299 testDownsampler testDownsampler, 3300 testCounterMetrics []testCounterMetric, 3301 testGaugeMetrics []testGaugeMetric, 3302 testTimerMetrics []testTimerMetric, 3303 ) { 3304 downsampler := testDownsampler.downsampler 3305 3306 testOpts := testDownsampler.testOpts 3307 3308 logger := testDownsampler.instrumentOpts.Logger(). 3309 With(zap.String("test", t.Name())) 3310 3311 logger.Info("write test metrics") 3312 appender, err := downsampler.NewMetricsAppender() 3313 require.NoError(t, err) 3314 defer appender.Finalize() 3315 3316 var opts SampleAppenderOptions 3317 if testOpts.sampleAppenderOpts != nil { 3318 opts = *testOpts.sampleAppenderOpts 3319 } 3320 // make the current timestamp predictable: 3321 now := time.Now().Truncate(time.Microsecond) 3322 xNow := xtime.ToUnixNano(now) 3323 for _, metric := range testCounterMetrics { 3324 appender.NextMetric() 3325 3326 for name, value := range metric.tags { 3327 appender.AddTag([]byte(name), []byte(value)) 3328 } 3329 3330 samplesAppenderResult, err := appender.SamplesAppender(opts) 3331 require.NoError(t, err) 3332 require.Equal(t, metric.expectDropPolicyApplied, 3333 samplesAppenderResult.IsDropPolicyApplied) 3334 require.Equal(t, metric.expectDropTimestamp, 3335 samplesAppenderResult.ShouldDropTimestamp) 3336 3337 samplesAppender := samplesAppenderResult.SamplesAppender 3338 for _, sample := range metric.samples { 3339 err = samplesAppender.AppendUntimedCounterSample(xtime.Now(), sample, nil) 3340 require.NoError(t, err) 3341 } 3342 for _, sample := range metric.timedSamples { 3343 if sample.time.IsZero() { 3344 sample.time = xNow // Allow empty time to mean "now" 3345 } 3346 if sample.offset > 0 { 3347 sample.time = sample.time.Add(sample.offset) 3348 } 3349 if testOpts.waitForOffset { 3350 time.Sleep(sample.offset) 3351 } 3352 if samplesAppenderResult.ShouldDropTimestamp { 3353 err = samplesAppender.AppendUntimedCounterSample(sample.time, sample.value, nil) 3354 } else { 3355 err = samplesAppender.AppendCounterSample(sample.time, sample.value, nil) 3356 } 3357 require.NoError(t, err) 3358 } 3359 } 3360 for _, metric := range testGaugeMetrics { 3361 appender.NextMetric() 3362 3363 for name, value := range metric.tags { 3364 appender.AddTag([]byte(name), []byte(value)) 3365 } 3366 3367 samplesAppenderResult, err := appender.SamplesAppender(opts) 3368 require.NoError(t, err) 3369 require.Equal(t, metric.expectDropPolicyApplied, 3370 samplesAppenderResult.IsDropPolicyApplied) 3371 require.Equal(t, metric.expectDropTimestamp, 3372 samplesAppenderResult.ShouldDropTimestamp) 3373 3374 samplesAppender := samplesAppenderResult.SamplesAppender 3375 for _, sample := range metric.samples { 3376 err = samplesAppender.AppendUntimedGaugeSample(xtime.Now(), sample, nil) 3377 require.NoError(t, err) 3378 } 3379 for _, sample := range metric.timedSamples { 3380 if sample.time.IsZero() { 3381 sample.time = xNow // Allow empty time to mean "now" 3382 } 3383 if sample.offset > 0 { 3384 sample.time = sample.time.Add(sample.offset) 3385 } 3386 if testOpts.waitForOffset { 3387 time.Sleep(sample.offset) 3388 } 3389 if samplesAppenderResult.ShouldDropTimestamp { 3390 err = samplesAppender.AppendUntimedGaugeSample(sample.time, sample.value, nil) 3391 } else { 3392 err = samplesAppender.AppendGaugeSample(sample.time, sample.value, nil) 3393 } 3394 require.NoError(t, err) 3395 } 3396 } 3397 3398 //nolint:dupl 3399 for _, metric := range testTimerMetrics { 3400 appender.NextMetric() 3401 3402 for name, value := range metric.tags { 3403 appender.AddTag([]byte(name), []byte(value)) 3404 } 3405 3406 samplesAppenderResult, err := appender.SamplesAppender(opts) 3407 require.NoError(t, err) 3408 require.Equal(t, metric.expectDropPolicyApplied, 3409 samplesAppenderResult.IsDropPolicyApplied) 3410 require.Equal(t, metric.expectDropTimestamp, 3411 samplesAppenderResult.ShouldDropTimestamp) 3412 3413 samplesAppender := samplesAppenderResult.SamplesAppender 3414 for _, sample := range metric.samples { 3415 err = samplesAppender.AppendUntimedTimerSample(xtime.Now(), sample, nil) 3416 require.NoError(t, err) 3417 } 3418 for _, sample := range metric.timedSamples { 3419 if sample.time.IsZero() { 3420 sample.time = xtime.ToUnixNano(now) // Allow empty time to mean "now" 3421 } 3422 if sample.offset > 0 { 3423 sample.time = sample.time.Add(sample.offset) 3424 } 3425 if testOpts.waitForOffset { 3426 time.Sleep(sample.offset) 3427 } 3428 if samplesAppenderResult.ShouldDropTimestamp { 3429 err = samplesAppender.AppendUntimedTimerSample(sample.time, sample.value, nil) 3430 } else { 3431 err = samplesAppender.AppendTimerSample(sample.time, sample.value, nil) 3432 } 3433 require.NoError(t, err) 3434 } 3435 } 3436 } 3437 3438 func setAggregatedNamespaces( 3439 t *testing.T, 3440 testDownsampler testDownsampler, 3441 session dbclient.Session, 3442 namespaces ...m3.AggregatedClusterNamespaceDefinition, 3443 ) { 3444 clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{ 3445 NamespaceID: ident.StringID("default"), 3446 Retention: 48 * time.Hour, 3447 Session: session, 3448 }, namespaces...) 3449 require.NoError(t, err) 3450 require.NoError(t, testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces())) 3451 } 3452 3453 func tagsToStringMap(tags models.Tags) map[string]string { 3454 stringMap := make(map[string]string, tags.Len()) 3455 for _, t := range tags.Tags { 3456 stringMap[string(t.Name)] = string(t.Value) 3457 } 3458 3459 return stringMap 3460 } 3461 3462 type testDownsampler struct { 3463 opts DownsamplerOptions 3464 testOpts testDownsamplerOptions 3465 downsampler Downsampler 3466 storage mock.Storage 3467 rulesStore rules.Store 3468 instrumentOpts instrument.Options 3469 } 3470 3471 type testDownsamplerOptions struct { 3472 clockOpts clock.Options 3473 instrumentOpts instrument.Options 3474 identTag string 3475 untimedRollups bool 3476 waitForOffset bool 3477 3478 // Options for the test 3479 autoMappingRules []m3.ClusterNamespaceOptions 3480 sampleAppenderOpts *SampleAppenderOptions 3481 remoteClientMock *client.MockClient 3482 rulesConfig *RulesConfiguration 3483 matcherConfig MatcherConfiguration 3484 3485 // Test ingest and expectations overrides 3486 ingest *testDownsamplerOptionsIngest 3487 expect *testDownsamplerOptionsExpect 3488 3489 kvStore kv.Store 3490 } 3491 3492 type testDownsamplerOptionsIngest struct { 3493 counterMetrics []testCounterMetric 3494 gaugeMetrics []testGaugeMetric 3495 timerMetrics []testTimerMetric 3496 } 3497 3498 type testDownsamplerOptionsExpect struct { 3499 writes []testExpectedWrite 3500 allowFilter *testDownsamplerOptionsExpectAllowFilter 3501 } 3502 3503 type testDownsamplerOptionsExpectAllowFilter struct { 3504 attributes []storagemetadata.Attributes 3505 } 3506 3507 func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampler { 3508 if opts.expect == nil { 3509 opts.expect = &testDownsamplerOptionsExpect{} 3510 } 3511 storage := mock.NewMockStorage() 3512 rulesKVStore := mem.NewStore() 3513 3514 clockOpts := clock.NewOptions() 3515 if opts.clockOpts != nil { 3516 clockOpts = opts.clockOpts 3517 } 3518 3519 // Use a test instrument options by default to get the debug logs on by default. 3520 instrumentOpts := instrument.NewTestOptions(t) 3521 if opts.instrumentOpts != nil { 3522 instrumentOpts = opts.instrumentOpts 3523 } 3524 3525 matcherOpts := matcher.NewOptions() 3526 3527 // Initialize the namespaces 3528 _, err := rulesKVStore.Set(matcherOpts.NamespacesKey(), &rulepb.Namespaces{}) 3529 require.NoError(t, err) 3530 3531 rulesetKeyFmt := matcherOpts.RuleSetKeyFn()([]byte("%s")) 3532 rulesStoreOpts := ruleskv.NewStoreOptions(matcherOpts.NamespacesKey(), 3533 rulesetKeyFmt, nil) 3534 rulesStore := ruleskv.NewStore(rulesKVStore, rulesStoreOpts) 3535 3536 tagEncoderOptions := serialize.NewTagEncoderOptions() 3537 tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}) 3538 tagEncoderPoolOptions := pool.NewObjectPoolOptions(). 3539 SetSize(2). 3540 SetInstrumentOptions(instrumentOpts. 3541 SetMetricsScope(instrumentOpts.MetricsScope(). 3542 SubScope("tag-encoder-pool"))) 3543 tagDecoderPoolOptions := pool.NewObjectPoolOptions(). 3544 SetSize(2). 3545 SetInstrumentOptions(instrumentOpts. 3546 SetMetricsScope(instrumentOpts.MetricsScope(). 3547 SubScope("tag-decoder-pool"))) 3548 metricsAppenderPoolOptions := pool.NewObjectPoolOptions(). 3549 SetSize(2). 3550 SetInstrumentOptions(instrumentOpts. 3551 SetMetricsScope(instrumentOpts.MetricsScope(). 3552 SubScope("metrics-appender-pool"))) 3553 3554 cfg := Configuration{ 3555 BufferPastLimits: []BufferPastLimitConfiguration{ 3556 {Resolution: 0, BufferPast: 500 * time.Millisecond}, 3557 }, 3558 } 3559 if opts.remoteClientMock != nil { 3560 // Optionally set an override to use remote aggregation 3561 // with a mock client 3562 cfg.RemoteAggregator = &RemoteAggregatorConfiguration{ 3563 clientOverride: opts.remoteClientMock, 3564 } 3565 } 3566 if opts.rulesConfig != nil { 3567 cfg.Rules = opts.rulesConfig 3568 } 3569 cfg.Matcher = opts.matcherConfig 3570 cfg.UntimedRollups = opts.untimedRollups 3571 3572 clusterClient := clusterclient.NewMockClient(gomock.NewController(t)) 3573 kvStore := opts.kvStore 3574 if kvStore == nil { 3575 kvStore = mem.NewStore() 3576 } 3577 clusterClient.EXPECT().KV().Return(kvStore, nil).AnyTimes() 3578 instance, err := cfg.NewDownsampler(DownsamplerOptions{ 3579 Storage: storage, 3580 ClusterClient: clusterClient, 3581 RulesKVStore: rulesKVStore, 3582 ClusterNamespacesWatcher: m3.NewClusterNamespacesWatcher(), 3583 ClockOptions: clockOpts, 3584 InstrumentOptions: instrumentOpts, 3585 TagEncoderOptions: tagEncoderOptions, 3586 TagDecoderOptions: tagDecoderOptions, 3587 TagEncoderPoolOptions: tagEncoderPoolOptions, 3588 TagDecoderPoolOptions: tagDecoderPoolOptions, 3589 MetricsAppenderPoolOptions: metricsAppenderPoolOptions, 3590 RWOptions: xio.NewOptions(), 3591 TagOptions: models.NewTagOptions(), 3592 }) 3593 require.NoError(t, err) 3594 3595 if len(opts.autoMappingRules) > 0 { 3596 // Simulate the automapping rules being injected into the downsampler. 3597 ctrl := gomock.NewController(t) 3598 3599 var mockNamespaces m3.ClusterNamespaces 3600 for _, r := range opts.autoMappingRules { 3601 n := m3.NewMockClusterNamespace(ctrl) 3602 n.EXPECT(). 3603 Options(). 3604 Return(r). 3605 AnyTimes() 3606 mockNamespaces = append(mockNamespaces, n) 3607 } 3608 3609 instance.(*downsampler).OnUpdate(mockNamespaces) 3610 } 3611 3612 downcast, ok := instance.(*downsampler) 3613 require.True(t, ok) 3614 3615 return testDownsampler{ 3616 opts: downcast.opts, 3617 testOpts: opts, 3618 downsampler: instance, 3619 storage: storage, 3620 rulesStore: rulesStore, 3621 instrumentOpts: instrumentOpts, 3622 } 3623 } 3624 3625 func newTestID(t *testing.T, tags map[string]string) id.ID { 3626 tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(), 3627 pool.NewObjectPoolOptions().SetSize(1)) 3628 tagEncoderPool.Init() 3629 3630 tagsIter := newTags() 3631 for name, value := range tags { 3632 tagsIter.append([]byte(name), []byte(value)) 3633 } 3634 3635 tagEncoder := tagEncoderPool.Get() 3636 err := tagEncoder.Encode(tagsIter) 3637 require.NoError(t, err) 3638 3639 data, ok := tagEncoder.Data() 3640 require.True(t, ok) 3641 3642 size := 1 3643 tagDecoderPool := serialize.NewTagDecoderPool( 3644 serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{ 3645 CheckBytesWrapperPoolSize: &size, 3646 }), 3647 pool.NewObjectPoolOptions().SetSize(size)) 3648 tagDecoderPool.Init() 3649 3650 tagDecoder := tagDecoderPool.Get() 3651 3652 iter := serialize.NewMetricTagsIterator(tagDecoder, nil) 3653 iter.Reset(data.Bytes()) 3654 return iter 3655 } 3656 3657 func findWrites( 3658 writes []*storage.WriteQuery, 3659 name, identTag string, 3660 optionalMatchAttrs *storagemetadata.Attributes, 3661 ) ([]*storage.WriteQuery, bool) { 3662 var results []*storage.WriteQuery 3663 for _, w := range writes { 3664 if t, ok := w.Tags().Get([]byte(identTag)); ok { 3665 if !bytes.Equal(t, []byte(name)) { 3666 // Does not match name. 3667 continue 3668 } 3669 if optionalMatchAttrs != nil && w.Attributes() != *optionalMatchAttrs { 3670 // Tried to match attributes and not matched. 3671 continue 3672 } 3673 3674 // Matches name and all optional lookups. 3675 results = append(results, w) 3676 } 3677 } 3678 return results, len(results) > 0 3679 } 3680 3681 func testUpdateMetadata() rules.UpdateMetadata { 3682 return rules.NewRuleSetUpdateHelper(0).NewUpdateMetadata(time.Now().UnixNano(), "test") 3683 } 3684 3685 func readNamespacesKey(t *testing.T, store kv.Store) rulepb.Namespaces { 3686 v, err := store.Get(matcher.NewOptions().NamespacesKey()) 3687 require.NoError(t, err) 3688 var ns rulepb.Namespaces 3689 err = v.Unmarshal(&ns) 3690 require.NoError(t, err) 3691 require.NotNil(t, ns) 3692 return ns 3693 }