github.com/thanos-io/thanos@v0.32.5/pkg/receive/handler_test.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package receive 5 6 import ( 7 "bytes" 8 "context" 9 "fmt" 10 "io" 11 "math" 12 "net/http" 13 "net/http/httptest" 14 "os" 15 "path" 16 "path/filepath" 17 "runtime" 18 "runtime/pprof" 19 "strconv" 20 "strings" 21 "sync" 22 "testing" 23 "time" 24 25 "google.golang.org/grpc" 26 "gopkg.in/yaml.v3" 27 28 "github.com/alecthomas/units" 29 "github.com/go-kit/log" 30 "github.com/gogo/protobuf/proto" 31 "github.com/golang/snappy" 32 "github.com/pkg/errors" 33 "github.com/prometheus/client_golang/prometheus" 34 "github.com/prometheus/common/model" 35 "github.com/prometheus/prometheus/model/exemplar" 36 "github.com/prometheus/prometheus/model/histogram" 37 "github.com/prometheus/prometheus/model/labels" 38 prometheusMetadata "github.com/prometheus/prometheus/model/metadata" 39 "github.com/prometheus/prometheus/model/relabel" 40 "github.com/prometheus/prometheus/storage" 41 "github.com/prometheus/prometheus/tsdb" 42 43 "github.com/efficientgo/core/testutil" 44 45 "github.com/thanos-io/thanos/pkg/block/metadata" 46 "github.com/thanos-io/thanos/pkg/extkingpin" 47 "github.com/thanos-io/thanos/pkg/runutil" 48 "github.com/thanos-io/thanos/pkg/store/labelpb" 49 "github.com/thanos-io/thanos/pkg/store/storepb" 50 "github.com/thanos-io/thanos/pkg/store/storepb/prompb" 51 "github.com/thanos-io/thanos/pkg/tenancy" 52 ) 53 54 type fakeTenantAppendable struct { 55 f *fakeAppendable 56 } 57 58 func newFakeTenantAppendable(f *fakeAppendable) *fakeTenantAppendable { 59 return &fakeTenantAppendable{f: f} 60 } 61 62 func (t *fakeTenantAppendable) TenantAppendable(_ string) (Appendable, error) { 63 return t.f, nil 64 } 65 66 type fakeAppendable struct { 67 appender storage.Appender 68 appenderErr func() error 69 } 70 71 var _ Appendable = &fakeAppendable{} 72 73 func nilErrFn() error { 74 return nil 75 } 76 77 func (f *fakeAppendable) Appender(_ context.Context) (storage.Appender, error) { 78 errf := f.appenderErr 79 if errf == nil { 80 errf = nilErrFn 81 } 82 return f.appender, errf() 83 } 84 85 type fakeAppender struct { 86 sync.Mutex 87 samples map[storage.SeriesRef][]prompb.Sample 88 exemplars map[storage.SeriesRef][]exemplar.Exemplar 89 appendErr func() error 90 commitErr func() error 91 rollbackErr func() error 92 } 93 94 var _ storage.Appender = &fakeAppender{} 95 var _ storage.GetRef = &fakeAppender{} 96 97 func newFakeAppender(appendErr, commitErr, rollbackErr func() error) *fakeAppender { //nolint:unparam 98 if appendErr == nil { 99 appendErr = nilErrFn 100 } 101 if commitErr == nil { 102 commitErr = nilErrFn 103 } 104 if rollbackErr == nil { 105 rollbackErr = nilErrFn 106 } 107 return &fakeAppender{ 108 samples: make(map[storage.SeriesRef][]prompb.Sample), 109 appendErr: appendErr, 110 commitErr: commitErr, 111 rollbackErr: rollbackErr, 112 } 113 } 114 115 func (f *fakeAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, prometheusMetadata.Metadata) (storage.SeriesRef, error) { 116 return 0, nil 117 } 118 119 func (f *fakeAppender) Get(l labels.Labels) []prompb.Sample { 120 f.Lock() 121 defer f.Unlock() 122 s := f.samples[storage.SeriesRef(l.Hash())] 123 res := make([]prompb.Sample, len(s)) 124 copy(res, s) 125 return res 126 } 127 128 func (f *fakeAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { 129 f.Lock() 130 defer f.Unlock() 131 if ref == 0 { 132 ref = storage.SeriesRef(l.Hash()) 133 } 134 f.samples[ref] = append(f.samples[ref], prompb.Sample{Timestamp: t, Value: v}) 135 return ref, f.appendErr() 136 } 137 138 func (f *fakeAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { 139 f.Lock() 140 defer f.Unlock() 141 if ref == 0 { 142 ref = storage.SeriesRef(l.Hash()) 143 } 144 f.exemplars[ref] = append(f.exemplars[ref], e) 145 return ref, f.appendErr() 146 } 147 148 // TODO(rabenhorst): Needs to be implement for native histogram support. 149 func (f *fakeAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { 150 panic("not implemented") 151 } 152 153 func (f *fakeAppender) GetRef(l labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { 154 return storage.SeriesRef(hash), l 155 } 156 157 func (f *fakeAppender) Commit() error { 158 return f.commitErr() 159 } 160 161 func (f *fakeAppender) Rollback() error { 162 return f.rollbackErr() 163 } 164 165 func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uint64, hashringAlgo HashringAlgorithm) ([]*Handler, Hashring, error) { 166 var ( 167 cfg = []HashringConfig{{Hashring: "test"}} 168 handlers []*Handler 169 wOpts = &WriterOptions{} 170 ) 171 // create a fake peer group where we manually fill the cache with fake addresses pointed to our handlers 172 // This removes the network from the tests and creates a more consistent testing harness. 173 peers := &peerGroup{ 174 dialOpts: nil, 175 m: sync.RWMutex{}, 176 cache: map[string]storepb.WriteableStoreClient{}, 177 dialer: func(context.Context, string, ...grpc.DialOption) (*grpc.ClientConn, error) { 178 // dialer should never be called since we are creating fake clients with fake addresses 179 // this protects against some leaking test that may attempt to dial random IP addresses 180 // which may pose a security risk. 181 return nil, errors.New("unexpected dial called in testing") 182 }, 183 } 184 185 ag := addrGen{} 186 limiter, _ := NewLimiter(NewNopConfig(), nil, RouterIngestor, log.NewNopLogger(), 1*time.Second) 187 for i := range appendables { 188 h := NewHandler(nil, &Options{ 189 TenantHeader: tenancy.DefaultTenantHeader, 190 ReplicaHeader: DefaultReplicaHeader, 191 ReplicationFactor: replicationFactor, 192 ForwardTimeout: 5 * time.Minute, 193 Writer: NewWriter(log.NewNopLogger(), newFakeTenantAppendable(appendables[i]), wOpts), 194 Limiter: limiter, 195 }) 196 handlers = append(handlers, h) 197 h.peers = peers 198 addr := ag.newAddr() 199 h.options.Endpoint = addr 200 cfg[0].Endpoints = append(cfg[0].Endpoints, Endpoint{Address: h.options.Endpoint}) 201 peers.cache[addr] = &fakeRemoteWriteGRPCServer{h: h} 202 } 203 // Use hashmod as default. 204 if hashringAlgo == "" { 205 hashringAlgo = AlgorithmHashmod 206 } 207 208 hashring, err := NewMultiHashring(hashringAlgo, replicationFactor, cfg) 209 if err != nil { 210 return nil, nil, err 211 } 212 for _, h := range handlers { 213 h.Hashring(hashring) 214 } 215 return handlers, hashring, nil 216 } 217 218 func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsistencyDelay bool) { 219 appenderErrFn := func() error { return errors.New("failed to get appender") } 220 conflictErrFn := func() error { return storage.ErrOutOfBounds } 221 tooOldSampleErrFn := func() error { return storage.ErrTooOldSample } 222 commitErrFn := func() error { return errors.New("failed to commit") } 223 wreq := &prompb.WriteRequest{ 224 Timeseries: makeSeriesWithValues(50), 225 } 226 227 for _, tc := range []struct { 228 name string 229 status int 230 replicationFactor uint64 231 wreq *prompb.WriteRequest 232 appendables []*fakeAppendable 233 }{ 234 { 235 name: "size 1 success", 236 status: http.StatusOK, 237 replicationFactor: 1, 238 wreq: wreq, 239 appendables: []*fakeAppendable{ 240 { 241 appender: newFakeAppender(nil, nil, nil), 242 }, 243 }, 244 }, 245 { 246 name: "size 1 commit error", 247 status: http.StatusInternalServerError, 248 replicationFactor: 1, 249 wreq: wreq, 250 appendables: []*fakeAppendable{ 251 { 252 appender: newFakeAppender(nil, commitErrFn, nil), 253 }, 254 }, 255 }, 256 { 257 name: "size 1 conflict", 258 status: http.StatusConflict, 259 replicationFactor: 1, 260 wreq: wreq, 261 appendables: []*fakeAppendable{ 262 { 263 appender: newFakeAppender(conflictErrFn, nil, nil), 264 }, 265 }, 266 }, 267 { 268 name: "size 2 success", 269 status: http.StatusOK, 270 replicationFactor: 1, 271 wreq: wreq, 272 appendables: []*fakeAppendable{ 273 { 274 appender: newFakeAppender(nil, nil, nil), 275 }, 276 { 277 appender: newFakeAppender(nil, nil, nil), 278 }, 279 }, 280 }, 281 { 282 name: "size 3 success", 283 status: http.StatusOK, 284 replicationFactor: 1, 285 wreq: wreq, 286 appendables: []*fakeAppendable{ 287 { 288 appender: newFakeAppender(nil, nil, nil), 289 }, 290 { 291 appender: newFakeAppender(nil, nil, nil), 292 }, 293 { 294 appender: newFakeAppender(nil, nil, nil), 295 }, 296 }, 297 }, 298 { 299 name: "size 3 success with replication", 300 status: http.StatusOK, 301 replicationFactor: 3, 302 wreq: wreq, 303 appendables: []*fakeAppendable{ 304 { 305 appender: newFakeAppender(nil, nil, nil), 306 }, 307 { 308 appender: newFakeAppender(nil, nil, nil), 309 }, 310 { 311 appender: newFakeAppender(nil, nil, nil), 312 }, 313 }, 314 }, 315 { 316 name: "size 3 commit error", 317 status: http.StatusInternalServerError, 318 replicationFactor: 1, 319 wreq: wreq, 320 appendables: []*fakeAppendable{ 321 { 322 appender: newFakeAppender(nil, commitErrFn, nil), 323 }, 324 { 325 appender: newFakeAppender(nil, commitErrFn, nil), 326 }, 327 { 328 appender: newFakeAppender(nil, commitErrFn, nil), 329 }, 330 }, 331 }, 332 { 333 name: "size 3 commit error with replication", 334 status: http.StatusInternalServerError, 335 replicationFactor: 3, 336 wreq: wreq, 337 appendables: []*fakeAppendable{ 338 { 339 appender: newFakeAppender(nil, commitErrFn, nil), 340 }, 341 { 342 appender: newFakeAppender(nil, commitErrFn, nil), 343 }, 344 { 345 appender: newFakeAppender(nil, commitErrFn, nil), 346 }, 347 }, 348 }, 349 { 350 name: "size 3 appender error with replication", 351 status: http.StatusInternalServerError, 352 replicationFactor: 3, 353 wreq: wreq, 354 appendables: []*fakeAppendable{ 355 { 356 appender: newFakeAppender(nil, nil, nil), 357 appenderErr: appenderErrFn, 358 }, 359 { 360 appender: newFakeAppender(nil, nil, nil), 361 appenderErr: appenderErrFn, 362 }, 363 { 364 appender: newFakeAppender(nil, nil, nil), 365 appenderErr: appenderErrFn, 366 }, 367 }, 368 }, 369 { 370 name: "size 3 conflict with replication", 371 status: http.StatusConflict, 372 replicationFactor: 3, 373 wreq: wreq, 374 appendables: []*fakeAppendable{ 375 { 376 appender: newFakeAppender(conflictErrFn, nil, nil), 377 }, 378 { 379 appender: newFakeAppender(conflictErrFn, nil, nil), 380 }, 381 { 382 appender: newFakeAppender(conflictErrFn, nil, nil), 383 }, 384 }, 385 }, 386 { 387 name: "size 3 conflict and commit error with replication", 388 status: http.StatusConflict, 389 replicationFactor: 3, 390 wreq: wreq, 391 appendables: []*fakeAppendable{ 392 { 393 appender: newFakeAppender(conflictErrFn, commitErrFn, nil), 394 }, 395 { 396 appender: newFakeAppender(conflictErrFn, commitErrFn, nil), 397 }, 398 { 399 appender: newFakeAppender(conflictErrFn, commitErrFn, nil), 400 }, 401 }, 402 }, 403 { 404 name: "size 3 conflict with replication and error is ErrTooOldSample", 405 status: http.StatusConflict, 406 replicationFactor: 3, 407 wreq: wreq, 408 appendables: []*fakeAppendable{ 409 { 410 appender: newFakeAppender(tooOldSampleErrFn, nil, nil), 411 }, 412 { 413 appender: newFakeAppender(tooOldSampleErrFn, nil, nil), 414 }, 415 { 416 appender: newFakeAppender(tooOldSampleErrFn, nil, nil), 417 }, 418 }, 419 }, 420 { 421 name: "size 3 with replication and one faulty", 422 status: http.StatusOK, 423 replicationFactor: 3, 424 wreq: wreq, 425 appendables: []*fakeAppendable{ 426 { 427 appender: newFakeAppender(cycleErrors([]error{storage.ErrOutOfBounds, storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp}), nil, nil), 428 }, 429 { 430 appender: newFakeAppender(nil, nil, nil), 431 }, 432 { 433 appender: newFakeAppender(nil, nil, nil), 434 }, 435 }, 436 }, 437 { 438 name: "size 3 with replication and one commit error", 439 status: http.StatusOK, 440 replicationFactor: 3, 441 wreq: wreq, 442 appendables: []*fakeAppendable{ 443 { 444 appender: newFakeAppender(nil, commitErrFn, nil), 445 }, 446 { 447 appender: newFakeAppender(nil, nil, nil), 448 }, 449 { 450 appender: newFakeAppender(nil, nil, nil), 451 }, 452 }, 453 }, 454 { 455 name: "size 3 with replication and two conflicts", 456 status: http.StatusConflict, 457 replicationFactor: 3, 458 wreq: wreq, 459 appendables: []*fakeAppendable{ 460 { 461 appender: newFakeAppender(cycleErrors([]error{storage.ErrOutOfBounds, storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp}), nil, nil), 462 }, 463 { 464 appender: newFakeAppender(conflictErrFn, nil, nil), 465 }, 466 { 467 appender: newFakeAppender(nil, nil, nil), 468 }, 469 }, 470 }, 471 { 472 name: "size 3 with replication one conflict and one commit error", 473 status: http.StatusInternalServerError, 474 replicationFactor: 3, 475 wreq: wreq, 476 appendables: []*fakeAppendable{ 477 { 478 appender: newFakeAppender(nil, commitErrFn, nil), 479 }, 480 { 481 appender: newFakeAppender(nil, nil, nil), 482 }, 483 { 484 appender: newFakeAppender(cycleErrors([]error{storage.ErrOutOfBounds, storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp}), nil, nil), 485 }, 486 }, 487 }, 488 { 489 name: "size 3 with replication two commit errors", 490 status: http.StatusInternalServerError, 491 replicationFactor: 3, 492 wreq: wreq, 493 appendables: []*fakeAppendable{ 494 { 495 appender: newFakeAppender(nil, commitErrFn, nil), 496 }, 497 { 498 appender: newFakeAppender(nil, commitErrFn, nil), 499 }, 500 { 501 appender: newFakeAppender(nil, nil, nil), 502 }, 503 }, 504 }, 505 { 506 name: "size 6 with replication 3", 507 status: http.StatusOK, 508 replicationFactor: 3, 509 wreq: wreq, 510 appendables: []*fakeAppendable{ 511 { 512 appender: newFakeAppender(nil, nil, nil), 513 }, 514 { 515 appender: newFakeAppender(nil, nil, nil), 516 }, 517 { 518 appender: newFakeAppender(nil, nil, nil), 519 }, 520 { 521 appender: newFakeAppender(nil, nil, nil), 522 }, 523 { 524 appender: newFakeAppender(nil, nil, nil), 525 }, 526 { 527 appender: newFakeAppender(nil, nil, nil), 528 }, 529 }, 530 }, 531 { 532 name: "size 6 with replication 3 one commit and two conflict error", 533 status: http.StatusConflict, 534 replicationFactor: 3, 535 wreq: wreq, 536 appendables: []*fakeAppendable{ 537 { 538 appender: newFakeAppender(nil, commitErrFn, nil), 539 }, 540 { 541 appender: newFakeAppender(nil, conflictErrFn, nil), 542 }, 543 { 544 appender: newFakeAppender(nil, conflictErrFn, nil), 545 }, 546 { 547 appender: newFakeAppender(nil, nil, nil), 548 }, 549 { 550 appender: newFakeAppender(nil, nil, nil), 551 }, 552 { 553 appender: newFakeAppender(nil, nil, nil), 554 }, 555 }, 556 }, 557 { 558 name: "size 6 with replication 5 two commit errors", 559 status: http.StatusOK, 560 replicationFactor: 5, 561 wreq: wreq, 562 appendables: []*fakeAppendable{ 563 { 564 appender: newFakeAppender(nil, commitErrFn, nil), 565 }, 566 { 567 appender: newFakeAppender(nil, commitErrFn, nil), 568 }, 569 { 570 appender: newFakeAppender(nil, nil, nil), 571 }, 572 { 573 appender: newFakeAppender(nil, nil, nil), 574 }, 575 { 576 appender: newFakeAppender(nil, nil, nil), 577 }, 578 { 579 appender: newFakeAppender(nil, nil, nil), 580 }, 581 }, 582 }, 583 } { 584 t.Run(tc.name, func(t *testing.T) { 585 handlers, hashring, err := newTestHandlerHashring(tc.appendables, tc.replicationFactor, hashringAlgo) 586 if err != nil { 587 t.Fatalf("unable to create test handler: %v", err) 588 } 589 tenant := "test" 590 // Test from the point of view of every node 591 // so that we know status code does not depend 592 // on which node is erroring and which node is receiving. 593 for i, handler := range handlers { 594 // Test that the correct status is returned. 595 rec, err := makeRequest(handler, tenant, tc.wreq) 596 if err != nil { 597 t.Fatalf("handler %d: unexpectedly failed making HTTP request: %v", i+1, err) 598 } 599 if rec.Code != tc.status { 600 t.Errorf("handler %d: got unexpected HTTP status code: expected %d, got %d; body: %s", i+1, tc.status, rec.Code, rec.Body.String()) 601 } 602 } 603 604 if withConsistencyDelay { 605 time.Sleep(50 * time.Millisecond) 606 } 607 608 // Test that each time series is stored 609 // the correct amount of times in each fake DB. 610 for _, ts := range tc.wreq.Timeseries { 611 lset := make(labels.Labels, len(ts.Labels)) 612 for j := range ts.Labels { 613 lset[j] = labels.Label{ 614 Name: ts.Labels[j].Name, 615 Value: ts.Labels[j].Value, 616 } 617 } 618 for j, a := range tc.appendables { 619 if withConsistencyDelay { 620 var expected int 621 n := a.appender.(*fakeAppender).Get(lset) 622 got := uint64(len(n)) 623 if a.appenderErr == nil && endpointHit(t, hashring, tc.replicationFactor, handlers[j].options.Endpoint, tenant, &ts) { 624 // We have len(handlers) copies of each sample because the test case 625 // is run once for each handler and they all use the same appender. 626 expected = len(handlers) * len(ts.Samples) 627 } 628 if uint64(expected) != got { 629 t.Errorf("handler: %d, labels %q: expected %d samples, got %d", j, lset.String(), expected, got) 630 } 631 } else { 632 var expectedMin int 633 n := a.appender.(*fakeAppender).Get(lset) 634 got := uint64(len(n)) 635 if a.appenderErr == nil && endpointHit(t, hashring, tc.replicationFactor, handlers[j].options.Endpoint, tenant, &ts) { 636 // We have len(handlers) copies of each sample because the test case 637 // is run once for each handler and they all use the same appender. 638 expectedMin = int((tc.replicationFactor/2)+1) * len(ts.Samples) 639 } 640 if uint64(expectedMin) > got { 641 t.Errorf("handler: %d, labels %q: expected minimum of %d samples, got %d", j, lset.String(), expectedMin, got) 642 } 643 } 644 } 645 } 646 }) 647 } 648 } 649 650 func TestReceiveQuorumHashmod(t *testing.T) { 651 testReceiveQuorum(t, AlgorithmHashmod, false) 652 } 653 654 func TestReceiveQuorumKetama(t *testing.T) { 655 testReceiveQuorum(t, AlgorithmKetama, false) 656 } 657 658 func TestReceiveWithConsistencyDelayHashmod(t *testing.T) { 659 testReceiveQuorum(t, AlgorithmHashmod, true) 660 } 661 662 func TestReceiveWithConsistencyDelayKetama(t *testing.T) { 663 testReceiveQuorum(t, AlgorithmKetama, true) 664 } 665 666 func TestReceiveWriteRequestLimits(t *testing.T) { 667 for _, tc := range []struct { 668 name string 669 status int 670 amountSeries int 671 amountSamples int 672 }{ 673 { 674 name: "Request above limit of series", 675 status: http.StatusRequestEntityTooLarge, 676 amountSeries: 21, 677 }, 678 { 679 name: "Request under the limit of series", 680 status: http.StatusOK, 681 amountSeries: 20, 682 }, 683 { 684 name: "Request above limit of samples (series * samples)", 685 status: http.StatusRequestEntityTooLarge, 686 amountSeries: 30, 687 amountSamples: 15, 688 }, 689 { 690 name: "Request under the limit of samples (series * samples)", 691 status: http.StatusOK, 692 amountSeries: 10, 693 amountSamples: 2, 694 }, 695 { 696 name: "Request above body size limit", 697 status: http.StatusRequestEntityTooLarge, 698 amountSeries: 300, 699 amountSamples: 150, 700 }, 701 } { 702 t.Run(tc.name, func(t *testing.T) { 703 if tc.amountSamples == 0 { 704 tc.amountSamples = 1 705 } 706 707 appendables := []*fakeAppendable{ 708 { 709 appender: newFakeAppender(nil, nil, nil), 710 }, 711 { 712 appender: newFakeAppender(nil, nil, nil), 713 }, 714 { 715 appender: newFakeAppender(nil, nil, nil), 716 }, 717 } 718 handlers, _, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod) 719 if err != nil { 720 t.Fatalf("unable to create test handler: %v", err) 721 } 722 handler := handlers[0] 723 724 tenant := "test" 725 tenantConfig, err := yaml.Marshal(&RootLimitsConfig{ 726 WriteLimits: WriteLimitsConfig{ 727 TenantsLimits: TenantsWriteLimitsConfig{ 728 tenant: &WriteLimitConfig{ 729 RequestLimits: NewEmptyRequestLimitsConfig(). 730 SetSizeBytesLimit(int64(1 * units.Megabyte)). 731 SetSeriesLimit(20). 732 SetSamplesLimit(200), 733 }, 734 }, 735 }, 736 }) 737 if err != nil { 738 t.Fatal("handler: failed to generate limit configuration") 739 } 740 tmpLimitsPath := path.Join(t.TempDir(), "limits.yaml") 741 testutil.Ok(t, os.WriteFile(tmpLimitsPath, tenantConfig, 0666)) 742 limitConfig, _ := extkingpin.NewStaticPathContent(tmpLimitsPath) 743 handler.Limiter, _ = NewLimiter( 744 limitConfig, nil, RouterIngestor, log.NewNopLogger(), 1*time.Second, 745 ) 746 747 wreq := &prompb.WriteRequest{ 748 Timeseries: []prompb.TimeSeries{}, 749 } 750 751 for i := 0; i < tc.amountSeries; i += 1 { 752 label := labelpb.ZLabel{Name: "foo", Value: "bar"} 753 series := prompb.TimeSeries{ 754 Labels: []labelpb.ZLabel{label}, 755 } 756 for j := 0; j < tc.amountSamples; j += 1 { 757 sample := prompb.Sample{Value: float64(j), Timestamp: int64(j)} 758 series.Samples = append(series.Samples, sample) 759 } 760 wreq.Timeseries = append(wreq.Timeseries, series) 761 } 762 763 // Test that the correct status is returned. 764 rec, err := makeRequest(handler, tenant, wreq) 765 if err != nil { 766 t.Fatalf("handler %d: unexpectedly failed making HTTP request: %v", tc.status, err) 767 } 768 if rec.Code != tc.status { 769 t.Errorf("handler: got unexpected HTTP status code: expected %d, got %d; body: %s", tc.status, rec.Code, rec.Body.String()) 770 } 771 }) 772 } 773 } 774 775 // endpointHit is a helper to determine if a given endpoint in a hashring would be selected 776 // for a given time series, tenant, and replication factor. 777 func endpointHit(t *testing.T, h Hashring, rf uint64, endpoint, tenant string, timeSeries *prompb.TimeSeries) bool { 778 for i := uint64(0); i < rf; i++ { 779 e, err := h.GetN(tenant, timeSeries, i) 780 if err != nil { 781 t.Fatalf("got unexpected error querying hashring: %v", err) 782 } 783 if e == endpoint { 784 return true 785 } 786 } 787 return false 788 } 789 790 // cycleErrors returns an error generator that cycles through every given error. 791 func cycleErrors(errs []error) func() error { 792 var mu sync.Mutex 793 var i int 794 return func() error { 795 mu.Lock() 796 defer mu.Unlock() 797 err := errs[i] 798 i++ 799 if i >= len(errs) { 800 i = 0 801 } 802 return err 803 } 804 } 805 806 // makeRequest is a helper to make a correct request against a remote write endpoint given a request. 807 func makeRequest(h *Handler, tenant string, wreq *prompb.WriteRequest) (*httptest.ResponseRecorder, error) { 808 buf, err := proto.Marshal(wreq) 809 if err != nil { 810 return nil, errors.Wrap(err, "marshal request") 811 } 812 req, err := http.NewRequest("POST", h.options.Endpoint, bytes.NewBuffer(snappy.Encode(nil, buf))) 813 if err != nil { 814 return nil, errors.Wrap(err, "create request") 815 } 816 req.Header.Add(h.options.TenantHeader, tenant) 817 818 rec := httptest.NewRecorder() 819 h.receiveHTTP(rec, req) 820 rec.Flush() 821 822 return rec, nil 823 } 824 825 type addrGen struct{ n int } 826 827 func (a *addrGen) newAddr() string { 828 a.n++ 829 return fmt.Sprintf("http://node-%d:%d", a.n, 12345+a.n) 830 } 831 832 type fakeRemoteWriteGRPCServer struct { 833 h storepb.WriteableStoreServer 834 } 835 836 func (f *fakeRemoteWriteGRPCServer) RemoteWrite(ctx context.Context, in *storepb.WriteRequest, opts ...grpc.CallOption) (*storepb.WriteResponse, error) { 837 return f.h.RemoteWrite(ctx, in) 838 } 839 840 func BenchmarkHandlerReceiveHTTP(b *testing.B) { 841 benchmarkHandlerMultiTSDBReceiveRemoteWrite(testutil.NewTB(b)) 842 } 843 844 func TestHandlerReceiveHTTP(t *testing.T) { 845 benchmarkHandlerMultiTSDBReceiveRemoteWrite(testutil.NewTB(t)) 846 } 847 848 // tsOverrideTenantStorage is storage that overrides timestamp to make it have consistent interval. 849 type tsOverrideTenantStorage struct { 850 TenantStorage 851 852 interval int64 853 } 854 855 func (s *tsOverrideTenantStorage) TenantAppendable(tenant string) (Appendable, error) { 856 a, err := s.TenantStorage.TenantAppendable(tenant) 857 return &tsOverrideAppendable{Appendable: a, interval: s.interval}, err 858 } 859 860 type tsOverrideAppendable struct { 861 Appendable 862 863 interval int64 864 } 865 866 func (a *tsOverrideAppendable) Appender(ctx context.Context) (storage.Appender, error) { 867 ret, err := a.Appendable.Appender(ctx) 868 return &tsOverrideAppender{Appender: ret, interval: a.interval}, err 869 } 870 871 type tsOverrideAppender struct { 872 storage.Appender 873 874 interval int64 875 } 876 877 var cnt int64 878 879 func (a *tsOverrideAppender) Append(ref storage.SeriesRef, l labels.Labels, _ int64, v float64) (storage.SeriesRef, error) { 880 cnt += a.interval 881 return a.Appender.Append(ref, l, cnt, v) 882 } 883 884 func (a *tsOverrideAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { 885 return a.Appender.(storage.GetRef).GetRef(lset, hash) 886 } 887 888 // serializeSeriesWithOneSample returns marshaled and compressed remote write requests like it would 889 // be sent to Thanos receive. 890 // It has one sample and allow passing multiple series, in same manner as typical Prometheus would batch it. 891 func serializeSeriesWithOneSample(t testing.TB, series [][]labelpb.ZLabel) []byte { 892 r := &prompb.WriteRequest{Timeseries: make([]prompb.TimeSeries, 0, len(series))} 893 894 for _, s := range series { 895 r.Timeseries = append(r.Timeseries, prompb.TimeSeries{ 896 Labels: s, 897 // Timestamp does not matter, it will be overridden. 898 Samples: []prompb.Sample{{Value: math.MaxFloat64, Timestamp: math.MinInt64}}, 899 }) 900 } 901 body, err := proto.Marshal(r) 902 testutil.Ok(t, err) 903 return snappy.Encode(nil, body) 904 } 905 906 func makeSeriesWithValues(numSeries int) []prompb.TimeSeries { 907 series := make([]prompb.TimeSeries, numSeries) 908 for i := 0; i < numSeries; i++ { 909 series[i] = prompb.TimeSeries{ 910 Labels: []labelpb.ZLabel{ 911 { 912 Name: fmt.Sprintf("pod-%d", i), 913 Value: fmt.Sprintf("nginx-%d", i), 914 }, 915 }, 916 Samples: []prompb.Sample{ 917 { 918 Value: float64(i), 919 Timestamp: 10, 920 }, 921 }, 922 } 923 } 924 return series 925 } 926 927 func benchmarkHandlerMultiTSDBReceiveRemoteWrite(b testutil.TB) { 928 dir := b.TempDir() 929 930 handlers, _, err := newTestHandlerHashring([]*fakeAppendable{nil}, 1, AlgorithmHashmod) 931 if err != nil { 932 b.Fatalf("unable to create test handler: %v", err) 933 } 934 handler := handlers[0] 935 936 reg := prometheus.NewRegistry() 937 938 logger := log.NewNopLogger() 939 m := NewMultiTSDB( 940 dir, logger, reg, &tsdb.Options{ 941 MinBlockDuration: int64(2 * time.Hour / time.Millisecond), 942 MaxBlockDuration: int64(2 * time.Hour / time.Millisecond), 943 RetentionDuration: int64(6 * time.Hour / time.Millisecond), 944 NoLockfile: true, 945 StripeSize: 1, // Disable stripe pre allocation so we can have clear profiles. 946 }, 947 labels.FromStrings("replica", "01"), 948 "tenant_id", 949 nil, 950 false, 951 metadata.NoneFunc, 952 ) 953 defer func() { testutil.Ok(b, m.Close()) }() 954 handler.writer = NewWriter(logger, m, &WriterOptions{}) 955 956 testutil.Ok(b, m.Flush()) 957 testutil.Ok(b, m.Open()) 958 959 for _, tcase := range []struct { 960 name string 961 writeRequest []byte 962 }{ 963 { 964 name: "typical labels under 1KB, 500 of them", 965 writeRequest: serializeSeriesWithOneSample(b, func() [][]labelpb.ZLabel { 966 series := make([][]labelpb.ZLabel, 500) 967 for s := 0; s < len(series); s++ { 968 lbls := make([]labelpb.ZLabel, 10) 969 for i := 0; i < len(lbls); i++ { 970 // Label ~20B name, 50B value. 971 lbls[i] = labelpb.ZLabel{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} 972 } 973 series[s] = lbls 974 } 975 return series 976 }()), 977 }, 978 { 979 name: "typical labels under 1KB, 5000 of them", 980 writeRequest: serializeSeriesWithOneSample(b, func() [][]labelpb.ZLabel { 981 series := make([][]labelpb.ZLabel, 5000) 982 for s := 0; s < len(series); s++ { 983 lbls := make([]labelpb.ZLabel, 10) 984 for i := 0; i < len(lbls); i++ { 985 // Label ~20B name, 50B value. 986 lbls[i] = labelpb.ZLabel{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} 987 } 988 series[s] = lbls 989 } 990 return series 991 }()), 992 }, 993 { 994 name: "typical labels under 1KB, 20000 of them", 995 writeRequest: serializeSeriesWithOneSample(b, func() [][]labelpb.ZLabel { 996 series := make([][]labelpb.ZLabel, 20000) 997 for s := 0; s < len(series); s++ { 998 lbls := make([]labelpb.ZLabel, 10) 999 for i := 0; i < len(lbls); i++ { 1000 // Label ~20B name, 50B value. 1001 lbls[i] = labelpb.ZLabel{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} 1002 } 1003 series[s] = lbls 1004 } 1005 return series 1006 }()), 1007 }, 1008 { 1009 name: "extremely large label value 10MB, 10 of them", 1010 writeRequest: serializeSeriesWithOneSample(b, func() [][]labelpb.ZLabel { 1011 series := make([][]labelpb.ZLabel, 10) 1012 for s := 0; s < len(series); s++ { 1013 lbl := &strings.Builder{} 1014 lbl.Grow(1024 * 1024 * 10) // 10MB. 1015 word := "abcdefghij" 1016 for i := 0; i < lbl.Cap()/len(word); i++ { 1017 _, _ = lbl.WriteString(word) 1018 } 1019 series[s] = []labelpb.ZLabel{{Name: "__name__", Value: lbl.String()}} 1020 } 1021 return series 1022 }()), 1023 }, 1024 } { 1025 b.Run(tcase.name, func(b testutil.TB) { 1026 handler.options.DefaultTenantID = fmt.Sprintf("%v-ok", tcase.name) 1027 handler.writer.multiTSDB = &tsOverrideTenantStorage{TenantStorage: m, interval: 1} 1028 1029 // It takes time to create new tenant, wait for it. 1030 { 1031 app, err := m.TenantAppendable(handler.options.DefaultTenantID) 1032 testutil.Ok(b, err) 1033 1034 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 1035 defer cancel() 1036 1037 testutil.Ok(b, runutil.Retry(1*time.Second, ctx.Done(), func() error { 1038 _, err = app.Appender(ctx) 1039 return err 1040 })) 1041 } 1042 1043 b.Run("OK", func(b testutil.TB) { 1044 n := b.N() 1045 b.ResetTimer() 1046 for i := 0; i < n; i++ { 1047 r := httptest.NewRecorder() 1048 handler.receiveHTTP(r, &http.Request{ContentLength: int64(len(tcase.writeRequest)), Body: io.NopCloser(bytes.NewReader(tcase.writeRequest))}) 1049 testutil.Equals(b, http.StatusOK, r.Code, "got non 200 error: %v", r.Body.String()) 1050 } 1051 }) 1052 1053 handler.options.DefaultTenantID = fmt.Sprintf("%v-conflicting", tcase.name) 1054 handler.writer.multiTSDB = &tsOverrideTenantStorage{TenantStorage: m, interval: -1} // Timestamp can't go down, which will cause conflict error. 1055 1056 // It takes time to create new tenant, wait for it. 1057 { 1058 app, err := m.TenantAppendable(handler.options.DefaultTenantID) 1059 testutil.Ok(b, err) 1060 1061 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 1062 defer cancel() 1063 1064 testutil.Ok(b, runutil.Retry(1*time.Second, ctx.Done(), func() error { 1065 _, err = app.Appender(ctx) 1066 return err 1067 })) 1068 } 1069 1070 // First request should be fine, since we don't change timestamp, rest is wrong. 1071 r := httptest.NewRecorder() 1072 handler.receiveHTTP(r, &http.Request{ContentLength: int64(len(tcase.writeRequest)), Body: io.NopCloser(bytes.NewReader(tcase.writeRequest))}) 1073 testutil.Equals(b, http.StatusOK, r.Code, "got non 200 error: %v", r.Body.String()) 1074 1075 b.Run("conflict errors", func(b testutil.TB) { 1076 n := b.N() 1077 b.ResetTimer() 1078 for i := 0; i < n; i++ { 1079 r := httptest.NewRecorder() 1080 handler.receiveHTTP(r, &http.Request{ContentLength: int64(len(tcase.writeRequest)), Body: io.NopCloser(bytes.NewReader(tcase.writeRequest))}) 1081 testutil.Equals(b, http.StatusConflict, r.Code, "%v-%s", i, func() string { 1082 b, _ := io.ReadAll(r.Body) 1083 return string(b) 1084 }()) 1085 } 1086 }) 1087 }) 1088 } 1089 1090 runtime.GC() 1091 // Take snapshot at the end to reveal how much memory we keep in TSDB. 1092 testutil.Ok(b, Heap("../../../_dev/thanos/2021/receive2")) 1093 1094 } 1095 1096 func Heap(dir string) (err error) { 1097 if err := os.MkdirAll(dir, os.ModePerm); err != nil { 1098 return err 1099 } 1100 1101 f, err := os.Create(filepath.Join(dir, "errimpr1-go1.16.3.pprof")) 1102 if err != nil { 1103 return err 1104 } 1105 defer runutil.CloseWithErrCapture(&err, f, "close") 1106 return pprof.WriteHeapProfile(f) 1107 } 1108 1109 func TestIsTenantValid(t *testing.T) { 1110 for _, tcase := range []struct { 1111 name string 1112 tenant string 1113 1114 expectedErr error 1115 }{ 1116 { 1117 name: "test malicious tenant", 1118 tenant: "/etc/foo", 1119 expectedErr: errors.New("Tenant name not valid"), 1120 }, 1121 { 1122 name: "test malicious tenant going out of receiver directory", 1123 tenant: "./../../hacker_dir", 1124 expectedErr: errors.New("Tenant name not valid"), 1125 }, 1126 { 1127 name: "test slash-only tenant", 1128 tenant: "///", 1129 expectedErr: errors.New("Tenant name not valid"), 1130 }, 1131 { 1132 name: "test default tenant", 1133 tenant: "default-tenant", 1134 }, 1135 { 1136 name: "test tenant with uuid", 1137 tenant: "528d0490-8720-4478-aa29-819d90fc9a9f", 1138 }, 1139 { 1140 name: "test valid tenant", 1141 tenant: "foo", 1142 }, 1143 } { 1144 t.Run(tcase.name, func(t *testing.T) { 1145 err := tenancy.IsTenantValid(tcase.tenant) 1146 if tcase.expectedErr != nil { 1147 testutil.NotOk(t, err) 1148 testutil.Equals(t, tcase.expectedErr.Error(), err.Error()) 1149 return 1150 } 1151 testutil.Ok(t, err) 1152 }) 1153 } 1154 } 1155 1156 func TestRelabel(t *testing.T) { 1157 for _, tcase := range []struct { 1158 name string 1159 relabel []*relabel.Config 1160 writeRequest prompb.WriteRequest 1161 expectedWriteRequest prompb.WriteRequest 1162 }{ 1163 { 1164 name: "empty relabel configs", 1165 writeRequest: prompb.WriteRequest{ 1166 Timeseries: []prompb.TimeSeries{ 1167 { 1168 Labels: []labelpb.ZLabel{ 1169 { 1170 Name: "__name__", 1171 Value: "test_metric", 1172 }, 1173 { 1174 Name: "foo", 1175 Value: "bar", 1176 }, 1177 }, 1178 Samples: []prompb.Sample{ 1179 { 1180 Timestamp: 0, 1181 Value: 1, 1182 }, 1183 }, 1184 }, 1185 }, 1186 }, 1187 expectedWriteRequest: prompb.WriteRequest{ 1188 Timeseries: []prompb.TimeSeries{ 1189 { 1190 Labels: []labelpb.ZLabel{ 1191 { 1192 Name: "__name__", 1193 Value: "test_metric", 1194 }, 1195 { 1196 Name: "foo", 1197 Value: "bar", 1198 }, 1199 }, 1200 Samples: []prompb.Sample{ 1201 { 1202 Timestamp: 0, 1203 Value: 1, 1204 }, 1205 }, 1206 }, 1207 }, 1208 }, 1209 }, 1210 { 1211 name: "has relabel configs but no relabelling applied", 1212 relabel: []*relabel.Config{ 1213 { 1214 SourceLabels: model.LabelNames{"zoo"}, 1215 TargetLabel: "bar", 1216 Regex: relabel.MustNewRegexp("bar"), 1217 Action: relabel.Replace, 1218 Replacement: "baz", 1219 }, 1220 }, 1221 writeRequest: prompb.WriteRequest{ 1222 Timeseries: []prompb.TimeSeries{ 1223 { 1224 Labels: []labelpb.ZLabel{ 1225 { 1226 Name: "__name__", 1227 Value: "test_metric", 1228 }, 1229 { 1230 Name: "foo", 1231 Value: "bar", 1232 }, 1233 }, 1234 Samples: []prompb.Sample{ 1235 { 1236 Timestamp: 0, 1237 Value: 1, 1238 }, 1239 }, 1240 }, 1241 }, 1242 }, 1243 expectedWriteRequest: prompb.WriteRequest{ 1244 Timeseries: []prompb.TimeSeries{ 1245 { 1246 Labels: []labelpb.ZLabel{ 1247 { 1248 Name: "__name__", 1249 Value: "test_metric", 1250 }, 1251 { 1252 Name: "foo", 1253 Value: "bar", 1254 }, 1255 }, 1256 Samples: []prompb.Sample{ 1257 { 1258 Timestamp: 0, 1259 Value: 1, 1260 }, 1261 }, 1262 }, 1263 }, 1264 }, 1265 }, 1266 { 1267 name: "relabel rewrite existing labels", 1268 relabel: []*relabel.Config{ 1269 { 1270 TargetLabel: "foo", 1271 Action: relabel.Replace, 1272 Regex: relabel.MustNewRegexp(""), 1273 Replacement: "test", 1274 }, 1275 { 1276 TargetLabel: "__name__", 1277 Action: relabel.Replace, 1278 Regex: relabel.MustNewRegexp(""), 1279 Replacement: "foo", 1280 }, 1281 }, 1282 writeRequest: prompb.WriteRequest{ 1283 Timeseries: []prompb.TimeSeries{ 1284 { 1285 Labels: []labelpb.ZLabel{ 1286 { 1287 Name: "__name__", 1288 Value: "test_metric", 1289 }, 1290 { 1291 Name: "foo", 1292 Value: "bar", 1293 }, 1294 }, 1295 Samples: []prompb.Sample{ 1296 { 1297 Timestamp: 0, 1298 Value: 1, 1299 }, 1300 }, 1301 }, 1302 }, 1303 }, 1304 expectedWriteRequest: prompb.WriteRequest{ 1305 Timeseries: []prompb.TimeSeries{ 1306 { 1307 Labels: []labelpb.ZLabel{ 1308 { 1309 Name: "__name__", 1310 Value: "foo", 1311 }, 1312 { 1313 Name: "foo", 1314 Value: "test", 1315 }, 1316 }, 1317 Samples: []prompb.Sample{ 1318 { 1319 Timestamp: 0, 1320 Value: 1, 1321 }, 1322 }, 1323 }, 1324 }, 1325 }, 1326 }, 1327 { 1328 name: "relabel drops label", 1329 relabel: []*relabel.Config{ 1330 { 1331 Action: relabel.LabelDrop, 1332 Regex: relabel.MustNewRegexp("foo"), 1333 }, 1334 }, 1335 writeRequest: prompb.WriteRequest{ 1336 Timeseries: []prompb.TimeSeries{ 1337 { 1338 Labels: []labelpb.ZLabel{ 1339 { 1340 Name: "__name__", 1341 Value: "test_metric", 1342 }, 1343 { 1344 Name: "foo", 1345 Value: "bar", 1346 }, 1347 }, 1348 Samples: []prompb.Sample{ 1349 { 1350 Timestamp: 0, 1351 Value: 1, 1352 }, 1353 }, 1354 }, 1355 }, 1356 }, 1357 expectedWriteRequest: prompb.WriteRequest{ 1358 Timeseries: []prompb.TimeSeries{ 1359 { 1360 Labels: []labelpb.ZLabel{ 1361 { 1362 Name: "__name__", 1363 Value: "test_metric", 1364 }, 1365 }, 1366 Samples: []prompb.Sample{ 1367 { 1368 Timestamp: 0, 1369 Value: 1, 1370 }, 1371 }, 1372 }, 1373 }, 1374 }, 1375 }, 1376 { 1377 name: "relabel drops time series", 1378 relabel: []*relabel.Config{ 1379 { 1380 SourceLabels: model.LabelNames{"foo"}, 1381 Action: relabel.Drop, 1382 Regex: relabel.MustNewRegexp("bar"), 1383 }, 1384 }, 1385 writeRequest: prompb.WriteRequest{ 1386 Timeseries: []prompb.TimeSeries{ 1387 { 1388 Labels: []labelpb.ZLabel{ 1389 { 1390 Name: "__name__", 1391 Value: "test_metric", 1392 }, 1393 { 1394 Name: "foo", 1395 Value: "bar", 1396 }, 1397 }, 1398 Samples: []prompb.Sample{ 1399 { 1400 Timestamp: 0, 1401 Value: 1, 1402 }, 1403 }, 1404 }, 1405 }, 1406 }, 1407 expectedWriteRequest: prompb.WriteRequest{ 1408 Timeseries: []prompb.TimeSeries{}, 1409 }, 1410 }, 1411 { 1412 name: "relabel rewrite existing exemplar series labels", 1413 relabel: []*relabel.Config{ 1414 { 1415 Action: relabel.LabelDrop, 1416 Regex: relabel.MustNewRegexp("foo"), 1417 }, 1418 }, 1419 writeRequest: prompb.WriteRequest{ 1420 Timeseries: []prompb.TimeSeries{ 1421 { 1422 Labels: []labelpb.ZLabel{ 1423 { 1424 Name: "__name__", 1425 Value: "test_metric", 1426 }, 1427 { 1428 Name: "foo", 1429 Value: "bar", 1430 }, 1431 }, 1432 Exemplars: []prompb.Exemplar{ 1433 { 1434 Labels: []labelpb.ZLabel{ 1435 { 1436 Name: "traceID", 1437 Value: "foo", 1438 }, 1439 }, 1440 Value: 1, 1441 Timestamp: 1, 1442 }, 1443 }, 1444 }, 1445 }, 1446 }, 1447 expectedWriteRequest: prompb.WriteRequest{ 1448 Timeseries: []prompb.TimeSeries{ 1449 { 1450 Labels: []labelpb.ZLabel{ 1451 { 1452 Name: "__name__", 1453 Value: "test_metric", 1454 }, 1455 }, 1456 Exemplars: []prompb.Exemplar{ 1457 { 1458 Labels: []labelpb.ZLabel{ 1459 { 1460 Name: "traceID", 1461 Value: "foo", 1462 }, 1463 }, 1464 Value: 1, 1465 Timestamp: 1, 1466 }, 1467 }, 1468 }, 1469 }, 1470 }, 1471 }, 1472 { 1473 name: "relabel drops exemplars", 1474 relabel: []*relabel.Config{ 1475 { 1476 SourceLabels: model.LabelNames{"foo"}, 1477 Action: relabel.Drop, 1478 Regex: relabel.MustNewRegexp("bar"), 1479 }, 1480 }, 1481 writeRequest: prompb.WriteRequest{ 1482 Timeseries: []prompb.TimeSeries{ 1483 { 1484 Labels: []labelpb.ZLabel{ 1485 { 1486 Name: "__name__", 1487 Value: "test_metric", 1488 }, 1489 { 1490 Name: "foo", 1491 Value: "bar", 1492 }, 1493 }, 1494 Exemplars: []prompb.Exemplar{ 1495 { 1496 Labels: []labelpb.ZLabel{ 1497 { 1498 Name: "traceID", 1499 Value: "foo", 1500 }, 1501 }, 1502 Value: 1, 1503 Timestamp: 1, 1504 }, 1505 }, 1506 }, 1507 }, 1508 }, 1509 expectedWriteRequest: prompb.WriteRequest{ 1510 Timeseries: []prompb.TimeSeries{}, 1511 }, 1512 }, 1513 } { 1514 t.Run(tcase.name, func(t *testing.T) { 1515 h := NewHandler(nil, &Options{ 1516 RelabelConfigs: tcase.relabel, 1517 }) 1518 1519 h.relabel(&tcase.writeRequest) 1520 testutil.Equals(t, tcase.expectedWriteRequest, tcase.writeRequest) 1521 }) 1522 } 1523 } 1524 1525 func TestGetStatsLimitParameter(t *testing.T) { 1526 t.Run("invalid limit parameter, not integer", func(t *testing.T) { 1527 r, err := http.NewRequest(http.MethodGet, "http://0:0", nil) 1528 testutil.Ok(t, err) 1529 1530 q := r.URL.Query() 1531 q.Add(LimitStatsQueryParam, "abc") 1532 r.URL.RawQuery = q.Encode() 1533 1534 _, err = getStatsLimitParameter(r) 1535 testutil.NotOk(t, err) 1536 }) 1537 t.Run("invalid limit parameter, too large", func(t *testing.T) { 1538 r, err := http.NewRequest(http.MethodGet, "http://0:0", nil) 1539 testutil.Ok(t, err) 1540 1541 q := r.URL.Query() 1542 q.Add(LimitStatsQueryParam, strconv.FormatUint(math.MaxInt+1, 10)) 1543 r.URL.RawQuery = q.Encode() 1544 1545 _, err = getStatsLimitParameter(r) 1546 testutil.NotOk(t, err) 1547 }) 1548 t.Run("not present returns default", func(t *testing.T) { 1549 r, err := http.NewRequest(http.MethodGet, "http://0:0", nil) 1550 testutil.Ok(t, err) 1551 1552 limit, err := getStatsLimitParameter(r) 1553 testutil.Ok(t, err) 1554 testutil.Equals(t, limit, DefaultStatsLimit) 1555 }) 1556 t.Run("if present and valid, the parameter is returned", func(t *testing.T) { 1557 r, err := http.NewRequest(http.MethodGet, "http://0:0", nil) 1558 testutil.Ok(t, err) 1559 1560 const givenLimit = 20 1561 1562 q := r.URL.Query() 1563 q.Add(LimitStatsQueryParam, strconv.FormatUint(givenLimit, 10)) 1564 r.URL.RawQuery = q.Encode() 1565 1566 limit, err := getStatsLimitParameter(r) 1567 testutil.Ok(t, err) 1568 testutil.Equals(t, limit, givenLimit) 1569 }) 1570 }