github.com/uber-go/tally/v4@v4.1.17/m3/reporter.go (about)

     1  // Copyright (c) 2024 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package m3
    22  
    23  import (
    24  	"fmt"
    25  	"io"
    26  	"math"
    27  	"os"
    28  	"runtime"
    29  	"sort"
    30  	"strconv"
    31  	"sync"
    32  	"time"
    33  
    34  	"github.com/pkg/errors"
    35  	tally "github.com/uber-go/tally/v4"
    36  	"github.com/uber-go/tally/v4/internal/cache"
    37  	customtransport "github.com/uber-go/tally/v4/m3/customtransports"
    38  	m3thrift "github.com/uber-go/tally/v4/m3/thrift/v2"
    39  	"github.com/uber-go/tally/v4/m3/thriftudp"
    40  	"github.com/uber-go/tally/v4/thirdparty/github.com/apache/thrift/lib/go/thrift"
    41  	"go.uber.org/atomic"
    42  )
    43  
    44  // Protocol describes a M3 thrift transport protocol.
    45  type Protocol int
    46  
    47  // Compact and Binary represent the compact and
    48  // binary thrift protocols respectively.
    49  const (
    50  	Compact Protocol = iota
    51  	Binary
    52  )
    53  
    54  const (
    55  	// ServiceTag is the name of the M3 service tag.
    56  	ServiceTag = "service"
    57  	// EnvTag is the name of the M3 env tag.
    58  	EnvTag = "env"
    59  	// HostTag is the name of the M3 host tag.
    60  	HostTag = "host"
    61  	// DefaultMaxQueueSize is the default M3 reporter queue size.
    62  	DefaultMaxQueueSize = 4096
    63  	// DefaultMaxPacketSize is the default M3 reporter max packet size.
    64  	DefaultMaxPacketSize = int32(32768)
    65  	// DefaultHistogramBucketIDName is the default histogram bucket ID tag name
    66  	DefaultHistogramBucketIDName = "bucketid"
    67  	// DefaultHistogramBucketName is the default histogram bucket name tag name
    68  	DefaultHistogramBucketName = "bucket"
    69  	// DefaultHistogramBucketTagPrecision is the default
    70  	// precision to use when formatting the metric tag
    71  	// with the histogram bucket bound values.
    72  	DefaultHistogramBucketTagPrecision = uint(6)
    73  
    74  	_emitMetricBatchOverhead    = 19
    75  	_minMetricBucketIDTagLength = 4
    76  	_timeResolution             = 100 * time.Millisecond
    77  )
    78  
    79  var (
    80  	_maxInt64   = int64(math.MaxInt64)
    81  	_maxFloat64 = math.MaxFloat64
    82  )
    83  
    84  type metricType int
    85  
    86  const (
    87  	counterType metricType = iota + 1
    88  	timerType
    89  	gaugeType
    90  )
    91  
    92  var (
    93  	errNoHostPorts   = errors.New("at least one entry for HostPorts is required")
    94  	errCommonTagSize = errors.New("common tags serialized size exceeds packet size")
    95  	errAlreadyClosed = errors.New("reporter already closed")
    96  )
    97  
    98  // Reporter is an M3 reporter.
    99  type Reporter interface {
   100  	tally.CachedStatsReporter
   101  	io.Closer
   102  }
   103  
   104  // reporter is a metrics backend that reports metrics to a local or
   105  // remote M3 collector, metrics are batched together and emitted
   106  // via either thrift compact or binary protocol in batch UDP packets.
   107  type reporter struct {
   108  	bucketIDTagName string
   109  	bucketTagName   string
   110  	bucketValFmt    string
   111  	buckets         []tally.BucketPair
   112  	calc            *customtransport.TCalcTransport
   113  	calcLock        sync.Mutex
   114  	calcProto       thrift.TProtocol
   115  	client          *m3thrift.M3Client
   116  	commonTags      []m3thrift.MetricTag
   117  	done            atomic.Bool
   118  	donech          chan struct{}
   119  	freeBytes       int32
   120  	metCh           chan sizedMetric
   121  	now             atomic.Int64
   122  	overheadBytes   int32
   123  	pending         atomic.Uint64
   124  	resourcePool    *resourcePool
   125  	stringInterner  *cache.StringInterner
   126  	tagCache        *cache.TagCache
   127  	wg              sync.WaitGroup
   128  
   129  	batchSizeHistogram    tally.CachedHistogram
   130  	numBatches            atomic.Int64
   131  	numBatchesCounter     tally.CachedCount
   132  	numMetrics            atomic.Int64
   133  	numMetricsCounter     tally.CachedCount
   134  	numWriteErrors        atomic.Int64
   135  	numWriteErrorsCounter tally.CachedCount
   136  	numTagCacheCounter    tally.CachedCount
   137  }
   138  
   139  // Options is a set of options for the M3 reporter.
   140  type Options struct {
   141  	HostPorts                   []string
   142  	Service                     string
   143  	Env                         string
   144  	CommonTags                  map[string]string
   145  	IncludeHost                 bool
   146  	Protocol                    Protocol
   147  	MaxQueueSize                int
   148  	MaxPacketSizeBytes          int32
   149  	HistogramBucketIDName       string
   150  	HistogramBucketName         string
   151  	HistogramBucketTagPrecision uint
   152  	InternalTags                map[string]string
   153  }
   154  
   155  // NewReporter creates a new M3 reporter.
   156  func NewReporter(opts Options) (Reporter, error) {
   157  	if opts.MaxQueueSize <= 0 {
   158  		opts.MaxQueueSize = DefaultMaxQueueSize
   159  	}
   160  	if opts.MaxPacketSizeBytes <= 0 {
   161  		opts.MaxPacketSizeBytes = DefaultMaxPacketSize
   162  	}
   163  	if opts.HistogramBucketIDName == "" {
   164  		opts.HistogramBucketIDName = DefaultHistogramBucketIDName
   165  	}
   166  	if opts.HistogramBucketName == "" {
   167  		opts.HistogramBucketName = DefaultHistogramBucketName
   168  	}
   169  	if opts.HistogramBucketTagPrecision == 0 {
   170  		opts.HistogramBucketTagPrecision = DefaultHistogramBucketTagPrecision
   171  	}
   172  
   173  	// Create M3 thrift client
   174  	var trans thrift.TTransport
   175  	var err error
   176  	if len(opts.HostPorts) == 0 {
   177  		err = errNoHostPorts
   178  	} else if len(opts.HostPorts) == 1 {
   179  		trans, err = thriftudp.NewTUDPClientTransport(opts.HostPorts[0], "")
   180  	} else {
   181  		trans, err = thriftudp.NewTMultiUDPClientTransport(opts.HostPorts, "")
   182  	}
   183  	if err != nil {
   184  		return nil, err
   185  	}
   186  
   187  	var protocolFactory thrift.TProtocolFactory
   188  	if opts.Protocol == Compact {
   189  		protocolFactory = thrift.NewTCompactProtocolFactory()
   190  	} else {
   191  		protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
   192  	}
   193  
   194  	var (
   195  		client       = m3thrift.NewM3ClientFactory(trans, protocolFactory)
   196  		resourcePool = newResourcePool(protocolFactory)
   197  		tagm         = make(map[string]string)
   198  		tags         = resourcePool.getMetricTagSlice()
   199  	)
   200  
   201  	// Create common tags
   202  	for k, v := range opts.CommonTags {
   203  		tagm[k] = v
   204  	}
   205  
   206  	if opts.CommonTags[ServiceTag] == "" {
   207  		if opts.Service == "" {
   208  			return nil, fmt.Errorf("%s common tag is required", ServiceTag)
   209  		}
   210  		tagm[ServiceTag] = opts.Service
   211  	}
   212  
   213  	if opts.CommonTags[EnvTag] == "" {
   214  		if opts.Env == "" {
   215  			return nil, fmt.Errorf("%s common tag is required", EnvTag)
   216  		}
   217  		tagm[EnvTag] = opts.Env
   218  	}
   219  
   220  	if opts.IncludeHost {
   221  		if opts.CommonTags[HostTag] == "" {
   222  			hostname, err := os.Hostname()
   223  			if err != nil {
   224  				return nil, errors.WithMessage(err, "error resolving host tag")
   225  			}
   226  			tagm[HostTag] = hostname
   227  		}
   228  	}
   229  
   230  	for k, v := range tagm {
   231  		tags = append(tags, m3thrift.MetricTag{
   232  			Name:  k,
   233  			Value: v,
   234  		})
   235  	}
   236  
   237  	// Calculate size of common tags
   238  	var (
   239  		batch = m3thrift.MetricBatch{
   240  			Metrics:    resourcePool.getMetricSlice(),
   241  			CommonTags: tags,
   242  		}
   243  		proto = resourcePool.getProto()
   244  	)
   245  
   246  	if err := batch.Write(proto); err != nil {
   247  		return nil, errors.WithMessage(
   248  			err,
   249  			"failed to write to proto for size calculation",
   250  		)
   251  	}
   252  
   253  	resourcePool.releaseMetricSlice(batch.Metrics)
   254  
   255  	var (
   256  		calc             = proto.Transport().(*customtransport.TCalcTransport)
   257  		numOverheadBytes = _emitMetricBatchOverhead + calc.GetCount()
   258  		freeBytes        = opts.MaxPacketSizeBytes - numOverheadBytes
   259  	)
   260  	calc.ResetCount()
   261  
   262  	if freeBytes <= 0 {
   263  		return nil, errCommonTagSize
   264  	}
   265  
   266  	buckets := tally.ValueBuckets(append(
   267  		[]float64{0.0},
   268  		tally.MustMakeExponentialValueBuckets(2.0, 2.0, 11)...,
   269  	))
   270  
   271  	r := &reporter{
   272  		buckets:         tally.BucketPairs(buckets),
   273  		bucketIDTagName: opts.HistogramBucketIDName,
   274  		bucketTagName:   opts.HistogramBucketName,
   275  		bucketValFmt:    "%." + strconv.Itoa(int(opts.HistogramBucketTagPrecision)) + "f",
   276  		calc:            calc,
   277  		calcProto:       proto,
   278  		client:          client,
   279  		commonTags:      tags,
   280  		donech:          make(chan struct{}),
   281  		freeBytes:       freeBytes,
   282  		metCh:           make(chan sizedMetric, opts.MaxQueueSize),
   283  		overheadBytes:   numOverheadBytes,
   284  		resourcePool:    resourcePool,
   285  		stringInterner:  cache.NewStringInterner(),
   286  		tagCache:        cache.NewTagCache(),
   287  	}
   288  
   289  	internalTags := map[string]string{
   290  		"version":  tally.Version,
   291  		"host":     tally.DefaultTagRedactValue,
   292  		"instance": tally.DefaultTagRedactValue,
   293  	}
   294  
   295  	for k, v := range opts.InternalTags {
   296  		internalTags[k] = v
   297  	}
   298  
   299  	r.batchSizeHistogram = r.AllocateHistogram("tally.internal.batch-size", internalTags, buckets)
   300  	r.numBatchesCounter = r.AllocateCounter("tally.internal.num-batches", internalTags)
   301  	r.numMetricsCounter = r.AllocateCounter("tally.internal.num-metrics", internalTags)
   302  	r.numWriteErrorsCounter = r.AllocateCounter("tally.internal.num-write-errors", internalTags)
   303  	r.numTagCacheCounter = r.AllocateCounter("tally.internal.num-tag-cache", internalTags)
   304  	r.wg.Add(1)
   305  	go func() {
   306  		defer r.wg.Done()
   307  		r.process()
   308  	}()
   309  
   310  	r.wg.Add(1)
   311  	go func() {
   312  		defer r.wg.Done()
   313  		r.timeLoop()
   314  	}()
   315  
   316  	return r, nil
   317  }
   318  
   319  // AllocateCounter implements tally.CachedStatsReporter.
   320  func (r *reporter) AllocateCounter(
   321  	name string,
   322  	tags map[string]string,
   323  ) tally.CachedCount {
   324  	return r.allocateCounter(name, tags)
   325  }
   326  
   327  func (r *reporter) allocateCounter(
   328  	name string,
   329  	tags map[string]string,
   330  ) cachedMetric {
   331  	var (
   332  		counter = r.newMetric(name, tags, counterType)
   333  		size    = r.calculateSize(counter)
   334  	)
   335  
   336  	return cachedMetric{
   337  		metric:   counter,
   338  		reporter: r,
   339  		size:     size,
   340  	}
   341  }
   342  
   343  // AllocateGauge implements tally.CachedStatsReporter.
   344  func (r *reporter) AllocateGauge(
   345  	name string,
   346  	tags map[string]string,
   347  ) tally.CachedGauge {
   348  	var (
   349  		gauge = r.newMetric(name, tags, gaugeType)
   350  		size  = r.calculateSize(gauge)
   351  	)
   352  
   353  	return cachedMetric{
   354  		metric:   gauge,
   355  		reporter: r,
   356  		size:     size,
   357  	}
   358  }
   359  
   360  // AllocateTimer implements tally.CachedStatsReporter.
   361  func (r *reporter) AllocateTimer(
   362  	name string,
   363  	tags map[string]string,
   364  ) tally.CachedTimer {
   365  	var (
   366  		timer = r.newMetric(name, tags, timerType)
   367  		size  = r.calculateSize(timer)
   368  	)
   369  
   370  	return cachedMetric{
   371  		metric:   timer,
   372  		reporter: r,
   373  		size:     size,
   374  	}
   375  }
   376  
   377  // AllocateHistogram implements tally.CachedStatsReporter.
   378  func (r *reporter) AllocateHistogram(
   379  	name string,
   380  	tags map[string]string,
   381  	buckets tally.Buckets,
   382  ) tally.CachedHistogram {
   383  	var (
   384  		_, isDuration = buckets.(tally.DurationBuckets)
   385  		bucketIDLen   = int(math.Max(
   386  			float64(ndigits(buckets.Len())),
   387  			float64(_minMetricBucketIDTagLength),
   388  		))
   389  		bucketIDFmt           = "%0" + strconv.Itoa(bucketIDLen) + "d"
   390  		cachedValueBuckets    []cachedHistogramBucket
   391  		cachedDurationBuckets []cachedHistogramBucket
   392  	)
   393  
   394  	var (
   395  		mtags        = r.convertTags(tags)
   396  		prevDuration = time.Duration(math.MinInt64)
   397  		prevValue    = -math.MaxFloat64
   398  	)
   399  	for i, pair := range tally.BucketPairs(buckets) {
   400  		var (
   401  			counter = r.allocateCounter(name, nil)
   402  			hbucket = cachedHistogramBucket{
   403  				bucketID:           r.stringInterner.Intern(fmt.Sprintf(bucketIDFmt, i)),
   404  				valueUpperBound:    pair.UpperBoundValue(),
   405  				durationUpperBound: pair.UpperBoundDuration(),
   406  				metric:             &counter,
   407  			}
   408  			delta = len(r.bucketIDTagName) + len(r.bucketTagName) + len(hbucket.bucketID)
   409  		)
   410  
   411  		hbucket.metric.metric.Tags = mtags
   412  		hbucket.metric.size = r.calculateSize(hbucket.metric.metric)
   413  
   414  		if isDuration {
   415  			bname := r.stringInterner.Intern(
   416  				r.durationBucketString(prevDuration) + "-" +
   417  					r.durationBucketString(pair.UpperBoundDuration()),
   418  			)
   419  			hbucket.bucket = bname
   420  			hbucket.metric.size += int32(delta + len(bname))
   421  			cachedDurationBuckets = append(cachedDurationBuckets, hbucket)
   422  		} else {
   423  			bname := r.stringInterner.Intern(
   424  				r.valueBucketString(prevValue) + "-" +
   425  					r.valueBucketString(pair.UpperBoundValue()),
   426  			)
   427  			hbucket.bucket = bname
   428  			hbucket.metric.size += int32(delta + len(bname))
   429  			cachedValueBuckets = append(cachedValueBuckets, hbucket)
   430  		}
   431  
   432  		prevDuration = pair.UpperBoundDuration()
   433  		prevValue = pair.UpperBoundValue()
   434  	}
   435  
   436  	return cachedHistogram{
   437  		r:                     r,
   438  		name:                  name,
   439  		cachedValueBuckets:    cachedValueBuckets,
   440  		cachedDurationBuckets: cachedDurationBuckets,
   441  	}
   442  }
   443  
   444  func (r *reporter) valueBucketString(v float64) string {
   445  	if v == math.MaxFloat64 {
   446  		return "infinity"
   447  	}
   448  	if v == -math.MaxFloat64 {
   449  		return "-infinity"
   450  	}
   451  	return fmt.Sprintf(r.bucketValFmt, v)
   452  }
   453  
   454  func (r *reporter) durationBucketString(d time.Duration) string {
   455  	if d == 0 {
   456  		return "0"
   457  	}
   458  	if d == time.Duration(math.MaxInt64) {
   459  		return "infinity"
   460  	}
   461  	if d == time.Duration(math.MinInt64) {
   462  		return "-infinity"
   463  	}
   464  	return d.String()
   465  }
   466  
   467  func (r *reporter) newMetric(
   468  	name string,
   469  	tags map[string]string,
   470  	t metricType,
   471  ) m3thrift.Metric {
   472  	m := m3thrift.Metric{
   473  		Name:      r.stringInterner.Intern(name),
   474  		Timestamp: _maxInt64,
   475  	}
   476  
   477  	switch t {
   478  	case counterType:
   479  		m.Value.MetricType = m3thrift.MetricType_COUNTER
   480  		m.Value.Count = _maxInt64
   481  	case gaugeType:
   482  		m.Value.MetricType = m3thrift.MetricType_GAUGE
   483  		m.Value.Gauge = _maxFloat64
   484  	case timerType:
   485  		m.Value.MetricType = m3thrift.MetricType_TIMER
   486  		m.Value.Timer = _maxInt64
   487  	}
   488  
   489  	if len(tags) == 0 {
   490  		return m
   491  	}
   492  
   493  	m.Tags = r.convertTags(tags)
   494  	return m
   495  }
   496  
   497  func (r *reporter) calculateSize(m m3thrift.Metric) int32 {
   498  	r.calcLock.Lock()
   499  	m.Write(r.calcProto) //nolint:errcheck
   500  	size := r.calc.GetCount()
   501  	r.calc.ResetCount()
   502  	r.calcLock.Unlock()
   503  	return size
   504  }
   505  
   506  func (r *reporter) reportCopyMetric(
   507  	m m3thrift.Metric,
   508  	size int32,
   509  	bucket string,
   510  	bucketID string,
   511  ) {
   512  	r.pending.Inc()
   513  	defer r.pending.Dec()
   514  
   515  	if r.done.Load() {
   516  		return
   517  	}
   518  
   519  	m.Timestamp = r.now.Load()
   520  
   521  	sm := sizedMetric{
   522  		m:        m,
   523  		size:     size,
   524  		set:      true,
   525  		bucket:   bucket,
   526  		bucketID: bucketID,
   527  	}
   528  
   529  	select {
   530  	case r.metCh <- sm:
   531  	case <-r.donech:
   532  	}
   533  }
   534  
   535  // Flush sends an empty sizedMetric to signal a flush.
   536  func (r *reporter) Flush() {
   537  	r.pending.Inc()
   538  	defer r.pending.Dec()
   539  
   540  	if r.done.Load() {
   541  		return
   542  	}
   543  
   544  	r.reportInternalMetrics()
   545  	r.metCh <- sizedMetric{}
   546  }
   547  
   548  // Close waits for metrics to be flushed before closing the backend.
   549  func (r *reporter) Close() (err error) {
   550  	if !r.done.CAS(false, true) {
   551  		return errAlreadyClosed
   552  	}
   553  
   554  	// Wait for any pending reports to complete.
   555  	for r.pending.Load() > 0 {
   556  		runtime.Gosched()
   557  	}
   558  
   559  	close(r.donech)
   560  	close(r.metCh)
   561  	r.wg.Wait()
   562  
   563  	return nil
   564  }
   565  
   566  func (r *reporter) Capabilities() tally.Capabilities {
   567  	return r
   568  }
   569  
   570  func (r *reporter) Reporting() bool {
   571  	return true
   572  }
   573  
   574  func (r *reporter) Tagging() bool {
   575  	return true
   576  }
   577  
   578  func (r *reporter) process() {
   579  	var (
   580  		extraTags = sync.Pool{
   581  			New: func() interface{} {
   582  				return make([]m3thrift.MetricTag, 0, 8)
   583  			},
   584  		}
   585  		borrowedTags = make([][]m3thrift.MetricTag, 0, 128)
   586  		mets         = make([]m3thrift.Metric, 0, r.freeBytes/10)
   587  		bytes        int32
   588  	)
   589  
   590  	for smet := range r.metCh {
   591  		flush := !smet.set && len(mets) > 0
   592  		if flush || bytes+smet.size > r.freeBytes {
   593  			r.numMetrics.Add(int64(len(mets)))
   594  			mets = r.flush(mets)
   595  			bytes = 0
   596  
   597  			if len(borrowedTags) > 0 {
   598  				for i := range borrowedTags {
   599  					extraTags.Put(borrowedTags[i][:0])
   600  				}
   601  				borrowedTags = borrowedTags[:0]
   602  			}
   603  		}
   604  
   605  		if !smet.set {
   606  			continue
   607  		}
   608  
   609  		m := smet.m
   610  		if len(smet.bucket) > 0 {
   611  			tags := extraTags.Get().([]m3thrift.MetricTag)
   612  			tags = append(tags, m.Tags...)
   613  			tags = append(
   614  				tags,
   615  				m3thrift.MetricTag{
   616  					Name:  r.bucketIDTagName,
   617  					Value: smet.bucketID,
   618  				},
   619  				m3thrift.MetricTag{
   620  					Name:  r.bucketTagName,
   621  					Value: smet.bucket,
   622  				},
   623  			)
   624  			borrowedTags = append(borrowedTags, tags)
   625  			m.Tags = tags
   626  		}
   627  
   628  		mets = append(mets, m)
   629  		bytes += smet.size
   630  	}
   631  
   632  	// Final flush
   633  	r.flush(mets)
   634  }
   635  
   636  func (r *reporter) flush(mets []m3thrift.Metric) []m3thrift.Metric {
   637  	if len(mets) == 0 {
   638  		return mets
   639  	}
   640  
   641  	r.numBatches.Inc()
   642  
   643  	err := r.client.EmitMetricBatchV2(m3thrift.MetricBatch{
   644  		Metrics:    mets,
   645  		CommonTags: r.commonTags,
   646  	})
   647  	if err != nil {
   648  		r.numWriteErrors.Inc()
   649  	}
   650  
   651  	// n.b. In the event that we had allocated additional tag storage in
   652  	//      process(), clear it so that it can be reclaimed. This does not
   653  	//      affect allocated metrics' tags.
   654  	for i := range mets {
   655  		mets[i].Tags = nil
   656  	}
   657  	return mets[:0]
   658  }
   659  
   660  func (r *reporter) convertTags(tags map[string]string) []m3thrift.MetricTag {
   661  	key := cache.TagMapKey(tags)
   662  
   663  	mtags, ok := r.tagCache.Get(key)
   664  	if !ok {
   665  		mtags = r.resourcePool.getMetricTagSlice()
   666  		for k, v := range tags {
   667  			mtags = append(mtags, m3thrift.MetricTag{
   668  				Name:  r.stringInterner.Intern(k),
   669  				Value: r.stringInterner.Intern(v),
   670  			})
   671  		}
   672  		mtags = r.tagCache.Set(key, mtags)
   673  	}
   674  
   675  	return mtags
   676  }
   677  
   678  func (r *reporter) reportInternalMetrics() {
   679  	var (
   680  		batches     = r.numBatches.Swap(0)
   681  		metrics     = r.numMetrics.Swap(0)
   682  		writeErrors = r.numWriteErrors.Swap(0)
   683  		batchSize   = float64(metrics) / float64(batches)
   684  	)
   685  
   686  	bucket := sort.Search(len(r.buckets), func(i int) bool {
   687  		return r.buckets[i].UpperBoundValue() >= batchSize
   688  	})
   689  
   690  	var value float64
   691  	if bucket < len(r.buckets) {
   692  		value = r.buckets[bucket].UpperBoundValue()
   693  	} else {
   694  		value = math.MaxFloat64
   695  	}
   696  
   697  	r.batchSizeHistogram.ValueBucket(0, value).ReportSamples(1)
   698  	r.numBatchesCounter.ReportCount(batches)
   699  	r.numMetricsCounter.ReportCount(metrics)
   700  	r.numWriteErrorsCounter.ReportCount(writeErrors)
   701  	r.numTagCacheCounter.ReportCount(int64(r.tagCache.Len()))
   702  }
   703  
   704  func (r *reporter) timeLoop() {
   705  	t := time.NewTicker(_timeResolution)
   706  	defer t.Stop()
   707  	for !r.done.Load() {
   708  		r.now.Store(time.Now().UnixNano())
   709  		select {
   710  		case <-t.C:
   711  		case <-r.donech:
   712  			return
   713  		}
   714  	}
   715  }
   716  
   717  type cachedMetric struct {
   718  	metric   m3thrift.Metric
   719  	reporter *reporter
   720  	size     int32
   721  }
   722  
   723  func (c cachedMetric) ReportCount(value int64) {
   724  	c.metric.Value.Count = value
   725  	c.reporter.reportCopyMetric(c.metric, c.size, "", "")
   726  }
   727  
   728  func (c cachedMetric) ReportGauge(value float64) {
   729  	c.metric.Value.Gauge = value
   730  	c.reporter.reportCopyMetric(c.metric, c.size, "", "")
   731  }
   732  
   733  func (c cachedMetric) ReportTimer(interval time.Duration) {
   734  	c.metric.Value.Timer = int64(interval)
   735  	c.reporter.reportCopyMetric(c.metric, c.size, "", "")
   736  }
   737  
   738  type noopMetric struct{}
   739  
   740  func (c noopMetric) ReportCount(value int64)            {}
   741  func (c noopMetric) ReportGauge(value float64)          {}
   742  func (c noopMetric) ReportTimer(interval time.Duration) {}
   743  func (c noopMetric) ReportSamples(value int64)          {}
   744  
   745  type cachedHistogram struct {
   746  	r                     *reporter
   747  	name                  string
   748  	cachedValueBuckets    []cachedHistogramBucket
   749  	cachedDurationBuckets []cachedHistogramBucket
   750  }
   751  
   752  func (h cachedHistogram) ValueBucket(
   753  	_ float64,
   754  	bucketUpperBound float64,
   755  ) tally.CachedHistogramBucket {
   756  	var (
   757  		n   = len(h.cachedValueBuckets)
   758  		idx = sort.Search(n, func(i int) bool {
   759  			return h.cachedValueBuckets[i].valueUpperBound >= bucketUpperBound
   760  		})
   761  	)
   762  
   763  	if idx == n {
   764  		return noopMetric{}
   765  	}
   766  
   767  	var (
   768  		b        = h.cachedValueBuckets[idx]
   769  		cm       = b.metric
   770  		m        = cm.metric
   771  		size     = cm.size
   772  		bucket   = b.bucket
   773  		bucketID = b.bucketID
   774  		rep      = cm.reporter
   775  	)
   776  
   777  	return reportSamplesFunc(func(value int64) {
   778  		m.Value.Count = value
   779  		rep.reportCopyMetric(m, size, bucket, bucketID)
   780  	})
   781  }
   782  
   783  func (h cachedHistogram) DurationBucket(
   784  	_ time.Duration,
   785  	bucketUpperBound time.Duration,
   786  ) tally.CachedHistogramBucket {
   787  	var (
   788  		n   = len(h.cachedDurationBuckets)
   789  		idx = sort.Search(n, func(i int) bool {
   790  			return h.cachedDurationBuckets[i].durationUpperBound >= bucketUpperBound
   791  		})
   792  	)
   793  
   794  	if idx == n {
   795  		return noopMetric{}
   796  	}
   797  
   798  	var (
   799  		b        = h.cachedDurationBuckets[idx]
   800  		cm       = b.metric
   801  		m        = cm.metric
   802  		size     = cm.size
   803  		bucket   = b.bucket
   804  		bucketID = b.bucketID
   805  		rep      = cm.reporter
   806  	)
   807  
   808  	return reportSamplesFunc(func(value int64) {
   809  		m.Value.Count = value
   810  		rep.reportCopyMetric(m, size, bucket, bucketID)
   811  	})
   812  }
   813  
   814  type cachedHistogramBucket struct {
   815  	metric             *cachedMetric
   816  	durationUpperBound time.Duration
   817  	valueUpperBound    float64
   818  	bucket             string
   819  	bucketID           string
   820  }
   821  
   822  type reportSamplesFunc func(value int64)
   823  
   824  func (f reportSamplesFunc) ReportSamples(value int64) {
   825  	f(value)
   826  }
   827  
   828  type sizedMetric struct {
   829  	m        m3thrift.Metric
   830  	size     int32
   831  	set      bool
   832  	bucket   string
   833  	bucketID string
   834  }
   835  
   836  func ndigits(i int) int {
   837  	n := 1
   838  	for i/10 != 0 {
   839  		n++
   840  		i /= 10
   841  	}
   842  	return n
   843  }