github.com/uber-go/tally/v4@v4.1.17/m3/reporter_test.go (about) 1 // Copyright (c) 2021 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package m3 22 23 import ( 24 "bytes" 25 "math/rand" 26 "net" 27 "os" 28 "strconv" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "testing" 33 "time" 34 35 tally "github.com/uber-go/tally/v4" 36 customtransport "github.com/uber-go/tally/v4/m3/customtransports" 37 m3thrift "github.com/uber-go/tally/v4/m3/thrift/v2" 38 "github.com/uber-go/tally/v4/m3/thriftudp" 39 "github.com/uber-go/tally/v4/thirdparty/github.com/apache/thrift/lib/go/thrift" 40 41 "github.com/stretchr/testify/assert" 42 "github.com/stretchr/testify/require" 43 ) 44 45 const ( 46 queueSize = 1000 47 includeHost = true 48 maxPacketSize = int32(1440) 49 shortInterval = 10 * time.Millisecond 50 ) 51 52 var ( 53 localListenAddr = &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)} 54 defaultCommonTags = map[string]string{"env": "test", "host": "test"} 55 ) 56 57 var protocols = []Protocol{Compact, Binary} 58 59 const internalMetrics = 5 // Additional metrics the reporter sends in a batch - use this, not a magic number. 60 const cardinalityMetrics = 4 // Additional metrics emitted by the scope registry. 61 62 // TestReporter tests the reporter works as expected with both compact and binary protocols 63 func TestReporter(t *testing.T) { 64 for _, protocol := range protocols { 65 var wg sync.WaitGroup 66 server := newFakeM3Server(t, &wg, true, protocol) 67 go server.Serve() 68 defer server.Close() 69 70 commonTags = map[string]string{ 71 "env": "development", 72 "host": hostname(), 73 "commonTag": "common", 74 "commonTag2": "tag", 75 "commonTag3": "val", 76 } 77 r, err := NewReporter(Options{ 78 HostPorts: []string{server.Addr}, 79 Service: "test-service", 80 CommonTags: commonTags, 81 IncludeHost: includeHost, 82 Protocol: protocol, 83 MaxQueueSize: queueSize, 84 MaxPacketSizeBytes: maxPacketSize, 85 }) 86 require.NoError(t, err) 87 defer func() { 88 assert.NoError(t, r.Close()) 89 }() 90 91 tags := map[string]string{"testTag": "TestValue", "testTag2": "TestValue2"} 92 93 wg.Add(2) 94 95 r.AllocateCounter("my-counter", tags).ReportCount(10) 96 r.Flush() 97 98 r.AllocateTimer("my-timer", tags).ReportTimer(5 * time.Millisecond) 99 r.Flush() 100 101 wg.Wait() 102 103 batches := server.Service.getBatches() 104 require.Equal(t, 2, len(batches)) 105 106 // Validate common tags 107 for _, batch := range batches { 108 require.NotNil(t, batch) 109 require.True(t, batch.IsSetCommonTags()) 110 require.Equal(t, len(commonTags)+1, len(batch.GetCommonTags())) 111 for _, tag := range batch.GetCommonTags() { 112 if tag.GetName() == ServiceTag { 113 require.Equal(t, "test-service", tag.GetValue()) 114 } else { 115 require.Equal(t, commonTags[tag.GetName()], tag.GetValue()) 116 } 117 } 118 } 119 120 // Validate metrics 121 emittedCounters := batches[0].GetMetrics() 122 require.Equal(t, internalMetrics+1, len(emittedCounters)) 123 emittedTimers := batches[1].GetMetrics() 124 require.Equal(t, internalMetrics+1, len(emittedTimers)) 125 126 emittedCounter, emittedTimer := emittedCounters[0], emittedTimers[0] 127 if emittedCounter.GetName() == "my-timer" { 128 emittedCounter, emittedTimer = emittedTimer, emittedCounter 129 } 130 131 require.Equal(t, "my-counter", emittedCounter.GetName()) 132 require.True(t, emittedCounter.IsSetTags()) 133 require.Equal(t, len(tags), len(emittedCounter.GetTags())) 134 for _, tag := range emittedCounter.GetTags() { 135 require.Equal(t, tags[tag.GetName()], tag.GetValue()) 136 } 137 require.True(t, emittedCounter.IsSetValue()) 138 emittedVal := emittedCounter.GetValue() 139 emittedCount := emittedVal.GetCount() 140 require.EqualValues(t, int64(10), emittedCount) 141 142 require.True(t, emittedTimer.IsSetValue()) 143 emittedVal = emittedTimer.GetValue() 144 emittedTimerVal := emittedVal.GetTimer() 145 require.EqualValues(t, int64(5*1000*1000), emittedTimerVal) 146 } 147 } 148 149 // TestMultiReporter tests the multi Reporter works as expected 150 func TestMultiReporter(t *testing.T) { 151 dests := []string{"127.0.0.1:9052", "127.0.0.1:9053"} 152 commonTags := map[string]string{ 153 "env": "test", 154 "host": "test", 155 "commonTag": "common", 156 "commonTag2": "tag", 157 "commonTag3": "val", 158 } 159 r, err := NewReporter(Options{ 160 HostPorts: dests, 161 Service: "test-service", 162 CommonTags: commonTags, 163 }) 164 require.NoError(t, err) 165 defer r.Close() 166 167 reporter, ok := r.(*reporter) 168 require.True(t, ok) 169 multitransport, ok := reporter.client.Transport.(*thriftudp.TMultiUDPTransport) 170 require.NotNil(t, multitransport) 171 require.True(t, ok) 172 } 173 174 // TestNewReporterErrors tests for Reporter creation errors 175 func TestNewReporterErrors(t *testing.T) { 176 var err error 177 // Test freeBytes (maxPacketSizeBytes - numOverheadBytes) is negative 178 _, err = NewReporter(Options{ 179 HostPorts: []string{"127.0.0.1"}, 180 Service: "test-service", 181 MaxQueueSize: 10, 182 MaxPacketSizeBytes: 2 << 5, 183 }) 184 assert.Error(t, err) 185 // Test invalid addr 186 _, err = NewReporter(Options{ 187 HostPorts: []string{"fakeAddress"}, 188 Service: "test-service", 189 }) 190 assert.Error(t, err) 191 } 192 193 // TestReporterRaceCondition checks if therem is race condition between reporter closing 194 // and metric reporting, when run with race detector on, this test should pass 195 func TestReporterRaceCondition(t *testing.T) { 196 r, err := NewReporter(Options{ 197 HostPorts: []string{"localhost:8888"}, 198 Service: "test-service", 199 CommonTags: defaultCommonTags, 200 MaxQueueSize: queueSize, 201 MaxPacketSizeBytes: maxPacketSize, 202 }) 203 require.NoError(t, err) 204 205 go func() { 206 r.AllocateTimer("my-timer", nil).ReportTimer(10 * time.Millisecond) 207 }() 208 r.Close() 209 } 210 211 // TestReporterFinalFlush ensures the Reporter emits the last batch of metrics 212 // after close 213 func TestReporterFinalFlush(t *testing.T) { 214 var wg sync.WaitGroup 215 server := newFakeM3Server(t, &wg, true, Compact) 216 go server.Serve() 217 defer server.Close() 218 219 r, err := NewReporter(Options{ 220 HostPorts: []string{server.Addr}, 221 Service: "test-service", 222 CommonTags: defaultCommonTags, 223 MaxQueueSize: queueSize, 224 MaxPacketSizeBytes: maxPacketSize, 225 }) 226 require.NoError(t, err) 227 228 wg.Add(1) 229 230 r.AllocateTimer("my-timer", nil).ReportTimer(10 * time.Millisecond) 231 r.Close() 232 233 wg.Wait() 234 235 require.Equal(t, 1, len(server.Service.getBatches())) 236 require.NotNil(t, server.Service.getBatches()[0]) 237 require.Equal(t, 1, len(server.Service.getBatches()[0].GetMetrics())) 238 } 239 240 // TestReporterNoPanicOnTimerAfterClose ensure the reporter avoids panic 241 // after close of the reporter when emitting a timer value 242 func TestReporterNoPanicOnTimerAfterClose(t *testing.T) { 243 server := newFakeM3Server(t, &sync.WaitGroup{}, true, Compact) 244 go server.Serve() 245 defer server.Close() 246 247 r, err := NewReporter(Options{ 248 HostPorts: []string{server.Addr}, 249 Service: "test-service", 250 CommonTags: defaultCommonTags, 251 MaxQueueSize: queueSize, 252 MaxPacketSizeBytes: maxPacketSize, 253 }) 254 require.NoError(t, err) 255 256 timer := r.AllocateTimer("my-timer", nil) 257 r.Close() 258 259 assert.NotPanics(t, func() { 260 timer.ReportTimer(time.Millisecond) 261 }) 262 } 263 264 // TestReporterNoPanicOnFlushAfterClose ensure the reporter avoids panic 265 // after close of the reporter when calling flush 266 func TestReporterNoPanicOnFlushAfterClose(t *testing.T) { 267 server := newFakeM3Server(t, &sync.WaitGroup{}, true, Compact) 268 go server.Serve() 269 defer server.Close() 270 271 r, err := NewReporter(Options{ 272 HostPorts: []string{server.Addr}, 273 Service: "test-service", 274 CommonTags: defaultCommonTags, 275 MaxQueueSize: queueSize, 276 MaxPacketSizeBytes: maxPacketSize, 277 }) 278 require.NoError(t, err) 279 r.Close() 280 281 assert.NotPanics(t, func() { 282 r.Flush() 283 }) 284 } 285 286 func TestReporterHistogram(t *testing.T) { 287 var wg sync.WaitGroup 288 server := newFakeM3Server(t, &wg, true, Compact) 289 go server.Serve() 290 defer server.Close() 291 292 r, err := NewReporter(Options{ 293 HostPorts: []string{server.Addr}, 294 Service: "test-service", 295 CommonTags: defaultCommonTags, 296 MaxQueueSize: queueSize, 297 MaxPacketSizeBytes: maxPacketSize, 298 }) 299 require.NoError(t, err) 300 301 wg.Add(1) 302 303 h := r.AllocateHistogram("my-histogram", map[string]string{ 304 "foo": "bar", 305 }, tally.DurationBuckets{ 306 0 * time.Millisecond, 307 25 * time.Millisecond, 308 50 * time.Millisecond, 309 75 * time.Millisecond, 310 100 * time.Millisecond, 311 }) 312 b := h.DurationBucket(0*time.Millisecond, 25*time.Millisecond) 313 b.ReportSamples(7) 314 b = h.DurationBucket(50*time.Millisecond, 75*time.Millisecond) 315 b.ReportSamples(3) 316 r.Close() 317 318 wg.Wait() 319 320 require.Equal(t, 1, len(server.Service.getBatches())) 321 require.NotNil(t, server.Service.getBatches()[0]) 322 require.Equal(t, 2, len(server.Service.getBatches()[0].GetMetrics())) 323 324 // Verify first bucket 325 counter := server.Service.getBatches()[0].GetMetrics()[0] 326 require.Equal(t, "my-histogram", counter.GetName()) 327 require.True(t, counter.IsSetTags()) 328 for _, tag := range counter.GetTags() { 329 require.Equal(t, map[string]string{ 330 "foo": "bar", 331 "bucketid": "0001", 332 "bucket": "0-25ms", 333 }[tag.GetName()], tag.GetValue()) 334 } 335 require.Equal(t, 3, len(counter.GetTags())) 336 require.True(t, counter.IsSetValue()) 337 val := counter.GetValue() 338 count := val.GetCount() 339 require.Equal(t, int64(7), count) 340 341 // Verify second bucket 342 counter = server.Service.getBatches()[0].GetMetrics()[1] 343 require.Equal(t, "my-histogram", counter.GetName()) 344 require.True(t, counter.IsSetTags()) 345 require.Equal(t, 3, len(counter.GetTags())) 346 for _, tag := range counter.GetTags() { 347 require.Equal(t, map[string]string{ 348 "foo": "bar", 349 "bucketid": "0003", 350 "bucket": "50ms-75ms", 351 }[tag.GetName()], tag.GetValue()) 352 } 353 require.True(t, counter.IsSetValue()) 354 val = counter.GetValue() 355 count = val.GetCount() 356 require.Equal(t, int64(3), count) 357 } 358 359 func TestBatchSizes(t *testing.T) { 360 server := newFakeM3Server(t, nil, false, Compact) 361 go server.Serve() 362 defer server.Close() 363 364 commonTags := map[string]string{ 365 "env": "test", 366 "domain": "pod" + strconv.Itoa(rand.Intn(100)), 367 } 368 maxPacketSize := int32(1440) 369 r, err := NewReporter(Options{ 370 HostPorts: []string{server.Addr}, 371 Service: "test-service", 372 CommonTags: commonTags, 373 MaxQueueSize: 10000, 374 MaxPacketSizeBytes: maxPacketSize, 375 }) 376 377 require.NoError(t, err) 378 rand.Seed(time.Now().UnixNano()) 379 380 var stop uint32 381 go func() { 382 var ( 383 counters = make(map[string]tally.CachedCount) 384 gauges = make(map[string]tally.CachedGauge) 385 timers = make(map[string]tally.CachedTimer) 386 randTags = func() map[string]string { 387 return map[string]string{ 388 "t1": "val" + strconv.Itoa(rand.Intn(10000)), 389 } 390 } 391 ) 392 for atomic.LoadUint32(&stop) == 0 { 393 metTypeRand := rand.Intn(9) 394 name := "size.test.metric.name" + strconv.Itoa(rand.Intn(50)) 395 396 if metTypeRand <= 2 { 397 _, ok := counters[name] 398 if !ok { 399 counters[name] = r.AllocateCounter(name, randTags()) 400 } 401 counters[name].ReportCount(rand.Int63n(10000)) 402 } else if metTypeRand <= 5 { 403 _, ok := gauges[name] 404 if !ok { 405 gauges[name] = r.AllocateGauge(name, randTags()) 406 } 407 gauges[name].ReportGauge(rand.Float64() * 10000) 408 } else { 409 _, ok := timers[name] 410 if !ok { 411 timers[name] = r.AllocateTimer(name, randTags()) 412 } 413 timers[name].ReportTimer(time.Duration(rand.Int63n(10000))) 414 } 415 } 416 r.Close() 417 }() 418 419 for len(server.Packets()) < 100 { 420 time.Sleep(shortInterval) 421 } 422 423 atomic.StoreUint32(&stop, 1) 424 for _, packet := range server.Packets() { 425 require.True(t, len(packet) < int(maxPacketSize)) 426 } 427 } 428 429 func TestReporterSpecifyService(t *testing.T) { 430 commonTags := map[string]string{ 431 ServiceTag: "overrideService", 432 EnvTag: "test", 433 HostTag: "overrideHost", 434 } 435 r, err := NewReporter(Options{ 436 HostPorts: []string{"127.0.0.1:1000"}, 437 Service: "test-service", 438 CommonTags: commonTags, 439 IncludeHost: includeHost, 440 MaxQueueSize: 10, MaxPacketSizeBytes: 100, 441 }) 442 require.NoError(t, err) 443 defer r.Close() 444 445 reporter, ok := r.(*reporter) 446 require.True(t, ok) 447 assert.Equal(t, 3, len(reporter.commonTags)) 448 for _, tag := range reporter.commonTags { 449 switch tag.GetName() { 450 case ServiceTag: 451 assert.Equal(t, "overrideService", tag.GetValue()) 452 case EnvTag: 453 assert.Equal(t, "test", tag.GetValue()) 454 case HostTag: 455 assert.Equal(t, "overrideHost", tag.GetValue()) 456 } 457 } 458 } 459 460 func TestIncludeHost(t *testing.T) { 461 var wg sync.WaitGroup 462 server := newFakeM3Server(t, &wg, true, Compact) 463 go server.Serve() 464 defer server.Close() 465 466 commonTags := map[string]string{"env": "test"} 467 r, err := NewReporter(Options{ 468 HostPorts: []string{server.Addr}, 469 Service: "test-service", 470 CommonTags: commonTags, 471 IncludeHost: false, 472 }) 473 require.NoError(t, err) 474 defer r.Close() 475 withoutHost, ok := r.(*reporter) 476 require.True(t, ok) 477 assert.False(t, tagIncluded(withoutHost.commonTags, "host")) 478 479 r, err = NewReporter(Options{ 480 HostPorts: []string{server.Addr}, 481 Service: "test-service", 482 CommonTags: commonTags, 483 IncludeHost: true, 484 }) 485 require.NoError(t, err) 486 defer r.Close() 487 withHost, ok := r.(*reporter) 488 require.True(t, ok) 489 assert.True(t, tagIncluded(withHost.commonTags, "host")) 490 } 491 492 func TestReporterResetTagsAfterReturnToPool(t *testing.T) { 493 var wg sync.WaitGroup 494 server := newFakeM3Server(t, &wg, false, Compact) 495 go server.Serve() 496 defer server.Close() 497 498 r, err := NewReporter(Options{ 499 HostPorts: []string{server.Addr}, 500 Service: "test-service", 501 CommonTags: defaultCommonTags, 502 MaxQueueSize: queueSize, 503 MaxPacketSizeBytes: maxPacketSize, 504 }) 505 require.NoError(t, err) 506 defer r.Close() 507 508 // Intentionally allocate and leak counters to exhaust metric pool. 509 for i := 0; i < metricPoolSize-2; i++ { 510 r.AllocateCounter("placeholder", nil) 511 } 512 513 // Allocate two counter so there is only one more slot in the pool. 514 tags := map[string]string{"tagName1": "tagValue1"} 515 c1 := r.AllocateCounter("counterWithTags", tags) 516 517 // Report the counter with tags to take the last slot. 518 wg.Add(internalMetrics + 1) 519 c1.ReportCount(1) 520 r.Flush() 521 wg.Wait() 522 523 // Empty flush to ensure the copied metric is released. 524 wg.Add(internalMetrics) 525 r.Flush() 526 for { 527 rep := r.(*reporter) 528 if len(rep.metCh) == 0 { 529 break 530 } 531 time.Sleep(5 * time.Millisecond) 532 } 533 534 // Allocate a new counter with no tags reusing the metric 535 // just released to the pool. 536 c2 := r.AllocateCounter("counterWithNoTags", nil) 537 538 // Report the counter with no tags. 539 wg.Add(internalMetrics + 1) 540 c2.ReportCount(1) 541 r.Flush() 542 wg.Wait() 543 544 // Verify that first reported counter has tags and the second 545 // reported counter has no tags. 546 metrics := server.Service.getMetrics() 547 require.Equal(t, 2+3*internalMetrics, len(metrics)) // 2 test metrics, 3 rounds of internal metrics 548 549 var filtered []m3thrift.Metric 550 for _, metric := range metrics { 551 if !strings.HasPrefix(metric.Name, "tally.internal") { 552 filtered = append(filtered, metric) 553 } 554 } 555 require.Equal(t, 2, len(filtered)) 556 557 require.Equal(t, len(tags), len(filtered[0].GetTags())) 558 for _, tag := range filtered[0].GetTags() { 559 require.Equal(t, tags[tag.GetName()], tag.GetValue()) 560 } 561 require.Equal(t, 0, len(filtered[1].GetTags())) 562 } 563 564 func TestReporterCommmonTagsInternal(t *testing.T) { 565 var wg sync.WaitGroup 566 server := newFakeM3Server(t, &wg, false, Compact) 567 go server.Serve() 568 defer server.Close() 569 570 internalTags := map[string]string{ 571 "internal1": "test1", 572 "internal2": "test2", 573 } 574 575 r, err := NewReporter(Options{ 576 HostPorts: []string{server.Addr}, 577 Service: "test-service", 578 CommonTags: defaultCommonTags, 579 MaxQueueSize: queueSize, 580 IncludeHost: true, 581 MaxPacketSizeBytes: maxPacketSize, 582 InternalTags: internalTags, 583 }) 584 require.NoError(t, err) 585 defer r.Close() 586 587 c := r.AllocateCounter("testCounter1", nil) 588 c.ReportCount(1) 589 wg.Add(internalMetrics + 1) 590 r.Flush() 591 wg.Wait() 592 593 numInternalMetricsActual := 0 594 metrics := server.Service.getMetrics() 595 require.Equal(t, internalMetrics+1, len(metrics)) 596 for _, metric := range metrics { 597 if strings.HasPrefix(metric.Name, "tally.internal") { 598 numInternalMetricsActual++ 599 for k, v := range internalTags { 600 require.True(t, tagEquals(metric.Tags, k, v)) 601 } 602 603 // The following tags should be redacted. 604 require.True(t, tagEquals(metric.Tags, "host", tally.DefaultTagRedactValue)) 605 require.True(t, tagEquals(metric.Tags, "instance", tally.DefaultTagRedactValue)) 606 } else { 607 require.Equal(t, "testCounter1", metric.Name) 608 require.False(t, tagIncluded(metric.Tags, "internal1")) 609 require.False(t, tagIncluded(metric.Tags, "internal2")) 610 } 611 612 // The following tags should not be present as part of the individual metrics 613 // as they are common tags. 614 require.False(t, tagIncluded(metric.Tags, "service")) 615 } 616 require.Equal(t, internalMetrics, numInternalMetricsActual) 617 } 618 619 func TestReporterHasReportingAndTaggingCapability(t *testing.T) { 620 r, err := NewReporter(Options{ 621 HostPorts: []string{"127.0.0.1:9052"}, 622 Service: "test-service", 623 CommonTags: defaultCommonTags, 624 }) 625 require.NoError(t, err) 626 627 assert.True(t, r.Capabilities().Reporting()) 628 assert.True(t, r.Capabilities().Tagging()) 629 } 630 631 type fakeM3Server struct { 632 t *testing.T 633 Service *fakeM3Service 634 Addr string 635 protocol Protocol 636 processor thrift.TProcessor 637 conn *net.UDPConn 638 closed int32 639 packets fakeM3ServerPackets 640 } 641 642 type fakeM3ServerPackets struct { 643 sync.RWMutex 644 values [][]byte 645 } 646 647 // newFakeM3Server creates a new fake M3 server that listens on a random port 648 // and returns the server. 649 // The server will wait for the given wait group to be done before returning. 650 // If countBatches is true, the server will wait consider the wg.Add()s to be 651 // representing batches and will do a eg.Done() for each encountered batch. 652 // But if countBatches is false, the server will do the same thing but for individual 653 // metrics instead of batches. 654 func newFakeM3Server(t *testing.T, wg *sync.WaitGroup, countBatches bool, protocol Protocol) *fakeM3Server { 655 service := newFakeM3Service(wg, countBatches) 656 processor := m3thrift.NewM3Processor(service) 657 conn, err := net.ListenUDP(localListenAddr.Network(), localListenAddr) 658 require.NoError(t, err, "ListenUDP failed") 659 660 return &fakeM3Server{ 661 t: t, 662 Service: service, 663 Addr: conn.LocalAddr().String(), 664 conn: conn, 665 protocol: protocol, 666 processor: processor, 667 } 668 } 669 670 func (f *fakeM3Server) Serve() { 671 readBuf := make([]byte, 64000) 672 for f.conn != nil { 673 n, err := f.conn.Read(readBuf) 674 if err != nil { 675 if atomic.LoadInt32(&f.closed) == 0 { 676 f.t.Errorf("FakeM3Server failed to Read: %v", err) 677 } 678 return 679 } 680 681 f.packets.Lock() 682 f.packets.values = append(f.packets.values, readBuf[0:n]) 683 f.packets.Unlock() 684 685 trans, _ := customtransport.NewTBufferedReadTransport(bytes.NewBuffer(readBuf[0:n])) 686 var proto thrift.TProtocol 687 if f.protocol == Compact { 688 proto = thrift.NewTCompactProtocol(trans) 689 } else { 690 proto = thrift.NewTBinaryProtocolTransport(trans) 691 } 692 693 _, err = f.processor.Process(proto, proto) 694 if terr, ok := err.(thrift.TTransportException); ok { 695 require.Equal(f.t, thrift.END_OF_FILE, terr.TypeId()) 696 } else { 697 require.NoError(f.t, err) 698 } 699 } 700 } 701 702 func (f *fakeM3Server) Close() error { 703 atomic.AddInt32(&f.closed, 1) 704 return f.conn.Close() 705 } 706 707 func (f *fakeM3Server) Packets() [][]byte { 708 f.packets.Lock() 709 defer f.packets.Unlock() 710 711 packets := make([][]byte, len(f.packets.values)) 712 copy(packets, f.packets.values) 713 return packets 714 } 715 716 func newFakeM3Service(wg *sync.WaitGroup, countBatches bool) *fakeM3Service { 717 return &fakeM3Service{wg: wg, countBatches: countBatches} 718 } 719 720 type fakeM3Service struct { 721 lock sync.RWMutex 722 batches []m3thrift.MetricBatch 723 metrics []m3thrift.Metric 724 wg *sync.WaitGroup 725 countBatches bool 726 } 727 728 func (m *fakeM3Service) getBatches() []m3thrift.MetricBatch { 729 m.lock.RLock() 730 defer m.lock.RUnlock() 731 return m.batches 732 } 733 734 func (m *fakeM3Service) getMetrics() []m3thrift.Metric { 735 m.lock.RLock() 736 defer m.lock.RUnlock() 737 return m.metrics 738 } 739 740 func (m *fakeM3Service) EmitMetricBatchV2(batch m3thrift.MetricBatch) (err error) { 741 m.lock.Lock() 742 m.batches = append(m.batches, batch) 743 if m.wg != nil && m.countBatches { 744 m.wg.Done() 745 } 746 747 for _, metric := range batch.Metrics { 748 m.metrics = append(m.metrics, metric) 749 if m.wg != nil && !m.countBatches { 750 m.wg.Done() 751 } 752 } 753 754 m.lock.Unlock() 755 return thrift.NewTTransportException(thrift.END_OF_FILE, "complete") 756 } 757 758 func hostname() string { 759 host, err := os.Hostname() 760 if err != nil { 761 host = "unknown" 762 } 763 return host 764 } 765 766 func tagIncluded(tags []m3thrift.MetricTag, tagName string) bool { 767 for _, tag := range tags { 768 if tag.Name == tagName { 769 return true 770 } 771 } 772 return false 773 } 774 775 func tagEquals(tags []m3thrift.MetricTag, tagName string, tagValue string) bool { 776 for _, tag := range tags { 777 if tag.GetName() == tagName && tag.GetValue() == tagValue { 778 return true 779 } 780 } 781 return false 782 }