github.com/m3db/m3@v1.5.0/src/cmd/services/m3coordinator/downsample/metrics_appender.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package downsample
    22  
    23  import (
    24  	"bytes"
    25  	"encoding/json"
    26  	"errors"
    27  	"fmt"
    28  	"sort"
    29  	"time"
    30  
    31  	"github.com/m3db/m3/src/aggregator/aggregator"
    32  	"github.com/m3db/m3/src/aggregator/client"
    33  	"github.com/m3db/m3/src/metrics/aggregation"
    34  	"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
    35  	"github.com/m3db/m3/src/metrics/matcher"
    36  	"github.com/m3db/m3/src/metrics/metadata"
    37  	"github.com/m3db/m3/src/metrics/metric"
    38  	"github.com/m3db/m3/src/metrics/metric/id"
    39  	"github.com/m3db/m3/src/metrics/policy"
    40  	"github.com/m3db/m3/src/metrics/rules"
    41  	"github.com/m3db/m3/src/query/graphite/graphite"
    42  	"github.com/m3db/m3/src/query/models"
    43  	"github.com/m3db/m3/src/query/ts"
    44  	"github.com/m3db/m3/src/x/clock"
    45  	"github.com/m3db/m3/src/x/pool"
    46  	"github.com/m3db/m3/src/x/serialize"
    47  
    48  	"github.com/gogo/protobuf/jsonpb"
    49  	"github.com/uber-go/tally"
    50  	"go.uber.org/zap"
    51  	"go.uber.org/zap/zapcore"
    52  )
    53  
    54  var errNoTags = errors.New("no tags provided")
    55  
    56  type metricsAppenderPool struct {
    57  	pool pool.ObjectPool
    58  }
    59  
    60  func newMetricsAppenderPool(
    61  	opts pool.ObjectPoolOptions,
    62  	tagLimits serialize.TagSerializationLimits,
    63  	nameTag []byte) *metricsAppenderPool {
    64  	p := &metricsAppenderPool{
    65  		pool: pool.NewObjectPool(opts),
    66  	}
    67  	p.pool.Init(func() interface{} {
    68  		return newMetricsAppender(p, tagLimits, nameTag)
    69  	})
    70  	return p
    71  }
    72  
    73  func (p *metricsAppenderPool) Get() *metricsAppender {
    74  	appender := p.pool.Get().(*metricsAppender)
    75  	// NB: reset appender.
    76  	appender.NextMetric()
    77  	return appender
    78  }
    79  
    80  func (p *metricsAppenderPool) Put(v *metricsAppender) {
    81  	p.pool.Put(v)
    82  }
    83  
    84  type metricsAppenderMetrics struct {
    85  	processedCountNonRollup tally.Counter
    86  	processedCountRollup    tally.Counter
    87  	operationsCount         tally.Counter
    88  }
    89  
    90  // a metricsAppender is not thread safe
    91  type metricsAppender struct {
    92  	metricsAppenderOptions
    93  
    94  	pool *metricsAppenderPool
    95  
    96  	multiSamplesAppender         *multiSamplesAppender
    97  	curr                         metadata.StagedMetadata
    98  	defaultStagedMetadatasCopies []metadata.StagedMetadatas
    99  	mappingRuleStoragePolicies   []policy.StoragePolicy
   100  
   101  	cachedEncoders []serialize.TagEncoder
   102  	inuseEncoders  []serialize.TagEncoder
   103  
   104  	originalTags *tags
   105  	cachedTags   []*tags
   106  	inuseTags    []*tags
   107  	tagIter      serialize.MetricTagsIterator
   108  	tagIterFn    id.SortedTagIteratorFn
   109  	nameTagFn    id.NameAndTagsFn
   110  }
   111  
   112  // metricsAppenderOptions will have one of agg or clientRemote set.
   113  type metricsAppenderOptions struct {
   114  	agg          aggregator.Aggregator
   115  	clientRemote client.Client
   116  
   117  	defaultStagedMetadatasProtos []metricpb.StagedMetadatas
   118  	matcher                      matcher.Matcher
   119  	tagEncoderPool               serialize.TagEncoderPool
   120  	untimedRollups               bool
   121  
   122  	clockOpts    clock.Options
   123  	debugLogging bool
   124  	logger       *zap.Logger
   125  	metrics      metricsAppenderMetrics
   126  }
   127  
   128  func newMetricsAppender(
   129  	pool *metricsAppenderPool,
   130  	tagLimits serialize.TagSerializationLimits,
   131  	nameTag []byte) *metricsAppender {
   132  	// N.B - a metrics appender is not thread safe, so it's fine to use an unchecked tag iterator. this significantly
   133  	// speeds up the matcher performance.
   134  	tagIter := serialize.NewUncheckedMetricTagsIterator(tagLimits)
   135  	return &metricsAppender{
   136  		pool:                 pool,
   137  		multiSamplesAppender: newMultiSamplesAppender(),
   138  		tagIter:              tagIter,
   139  		tagIterFn: func(tagPairs []byte) id.SortedTagIterator {
   140  			// Use the same tagIter for all matching computations. It is safe to reuse since a metric appender is
   141  			// single threaded and it's reset before each computation.
   142  			tagIter.Reset(tagPairs)
   143  			return tagIter
   144  		},
   145  		nameTagFn: func(id []byte) ([]byte, []byte, error) {
   146  			name, err := resolveEncodedTagsNameTag(id, nameTag)
   147  			if err != nil && !errors.Is(err, errNoMetricNameTag) {
   148  				return nil, nil, err
   149  			}
   150  			// ID is always the encoded tags for IDs in the downsampler
   151  			tags := id
   152  			return name, tags, nil
   153  		},
   154  	}
   155  }
   156  
   157  // reset is called when pulled from the pool.
   158  func (a *metricsAppender) reset(opts metricsAppenderOptions) {
   159  	a.metricsAppenderOptions = opts
   160  
   161  	// Copy over any previous inuse encoders to the cached encoders list.
   162  	a.resetEncoders()
   163  
   164  	// Make sure a.defaultStagedMetadatasCopies is right length.
   165  	capRequired := len(opts.defaultStagedMetadatasProtos)
   166  	if cap(a.defaultStagedMetadatasCopies) < capRequired {
   167  		// Too short, reallocate.
   168  		slice := make([]metadata.StagedMetadatas, capRequired)
   169  		a.defaultStagedMetadatasCopies = slice
   170  	} else {
   171  		// Has enough capacity, take subslice.
   172  		slice := a.defaultStagedMetadatasCopies[:capRequired]
   173  		a.defaultStagedMetadatasCopies = slice
   174  	}
   175  }
   176  
   177  func (a *metricsAppender) AddTag(name, value []byte) {
   178  	if a.originalTags == nil {
   179  		a.originalTags = a.tags()
   180  	}
   181  	a.originalTags.append(name, value)
   182  }
   183  
   184  func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAppenderResult, error) {
   185  	if a.originalTags == nil {
   186  		return SamplesAppenderResult{}, errNoTags
   187  	}
   188  	tags := a.originalTags
   189  
   190  	// NB (@shreyas): Add the metric type tag. The tag has the prefix
   191  	// __m3_. All tags with that prefix are only used for the purpose of
   192  	// filter match and then stripped off before we actually send to the aggregator.
   193  	switch opts.SeriesAttributes.M3Type {
   194  	case ts.M3MetricTypeCounter:
   195  		tags.append(metric.M3TypeTag, metric.M3CounterValue)
   196  	case ts.M3MetricTypeGauge:
   197  		tags.append(metric.M3TypeTag, metric.M3GaugeValue)
   198  	case ts.M3MetricTypeTimer:
   199  		tags.append(metric.M3TypeTag, metric.M3TimerValue)
   200  	}
   201  	switch opts.SeriesAttributes.PromType {
   202  	case ts.PromMetricTypeUnknown:
   203  		tags.append(metric.M3PromTypeTag, metric.PromUnknownValue)
   204  	case ts.PromMetricTypeCounter:
   205  		tags.append(metric.M3PromTypeTag, metric.PromCounterValue)
   206  	case ts.PromMetricTypeGauge:
   207  		tags.append(metric.M3PromTypeTag, metric.PromGaugeValue)
   208  	case ts.PromMetricTypeHistogram:
   209  		tags.append(metric.M3PromTypeTag, metric.PromHistogramValue)
   210  	case ts.PromMetricTypeGaugeHistogram:
   211  		tags.append(metric.M3PromTypeTag, metric.PromGaugeHistogramValue)
   212  	case ts.PromMetricTypeSummary:
   213  		tags.append(metric.M3PromTypeTag, metric.PromSummaryValue)
   214  	case ts.PromMetricTypeInfo:
   215  		tags.append(metric.M3PromTypeTag, metric.PromInfoValue)
   216  	case ts.PromMetricTypeStateSet:
   217  		tags.append(metric.M3PromTypeTag, metric.PromStateSetValue)
   218  	}
   219  
   220  	// Sort tags
   221  	sort.Sort(tags)
   222  
   223  	// Encode tags and compute a temporary (unowned) ID
   224  	tagEncoder := a.tagEncoder()
   225  	if err := tagEncoder.Encode(tags); err != nil {
   226  		return SamplesAppenderResult{}, err
   227  	}
   228  	data, ok := tagEncoder.Data()
   229  	if !ok {
   230  		return SamplesAppenderResult{}, fmt.Errorf("unable to encode tags: names=%v, values=%v",
   231  			tags.names, tags.values)
   232  	}
   233  
   234  	a.multiSamplesAppender.reset()
   235  	unownedID := data.Bytes()
   236  	// Match policies and rollups and build samples appender
   237  	a.tagIter.Reset(unownedID)
   238  	now := time.Now()
   239  	nowNanos := now.UnixNano()
   240  	fromNanos := nowNanos
   241  	toNanos := nowNanos + 1
   242  	// N.B - it's safe the reuse the shared tag iterator because the matcher uses it immediately to resolve the optional
   243  	// namespace tag to determine the ruleset for the namespace. then the ruleset matcher reuses the tag iterator for
   244  	// every match computation.
   245  	matchResult, err := a.matcher.ForwardMatch(a.tagIter, fromNanos, toNanos, rules.MatchOptions{
   246  		NameAndTagsFn:       a.nameTagFn,
   247  		SortedTagIteratorFn: a.tagIterFn,
   248  	})
   249  	if err != nil {
   250  		return SamplesAppenderResult{}, err
   251  	}
   252  
   253  	// Filter out augmented metrics tags we added for matching.
   254  	for _, filter := range defaultFilterOutTagPrefixes {
   255  		tags.filterPrefix(filter)
   256  	}
   257  
   258  	// Reuse a slice to keep the current staged metadatas we will apply.
   259  	a.curr.Pipelines = a.curr.Pipelines[:0]
   260  
   261  	// First, process any override explicitly provided as part of request
   262  	// (via request headers that specify target namespaces).
   263  	if opts.Override {
   264  		for _, rule := range opts.OverrideRules.MappingRules {
   265  			stagedMetadatas, err := rule.StagedMetadatas()
   266  			if err != nil {
   267  				return SamplesAppenderResult{}, err
   268  			}
   269  
   270  			a.debugLogMatch("downsampler applying override mapping rule",
   271  				debugLogMatchOptions{Meta: stagedMetadatas})
   272  
   273  			pipelines := stagedMetadatas[len(stagedMetadatas)-1]
   274  			a.curr.Pipelines =
   275  				append(a.curr.Pipelines, pipelines.Pipelines...)
   276  		}
   277  
   278  		if err := a.addSamplesAppenders(tags, a.curr); err != nil {
   279  			return SamplesAppenderResult{}, err
   280  		}
   281  
   282  		return SamplesAppenderResult{
   283  			SamplesAppender:     a.multiSamplesAppender,
   284  			IsDropPolicyApplied: false,
   285  			ShouldDropTimestamp: false,
   286  		}, nil
   287  	}
   288  
   289  	// Next, apply any mapping rules that match. We track which storage policies have been applied based on the
   290  	// mapping rules that match. Any storage policies that have been applied will be skipped when applying
   291  	// the auto-mapping rules to avoid redundant writes (i.e. overwriting each other).
   292  	var (
   293  		ruleStagedMetadatas = matchResult.ForExistingIDAt(nowNanos)
   294  		dropApplyResult     metadata.ApplyOrRemoveDropPoliciesResult
   295  		dropTimestamp       bool
   296  	)
   297  	a.mappingRuleStoragePolicies = a.mappingRuleStoragePolicies[:0]
   298  	if !ruleStagedMetadatas.IsDefault() && len(ruleStagedMetadatas) != 0 {
   299  		a.debugLogMatch("downsampler applying matched rule",
   300  			debugLogMatchOptions{Meta: ruleStagedMetadatas})
   301  
   302  		// Collect storage policies for all the current active mapping rules.
   303  		// TODO: we should convert this to iterate over pointers
   304  		// nolint:gocritic
   305  		for _, stagedMetadata := range ruleStagedMetadatas {
   306  			for _, pipe := range stagedMetadata.Pipelines {
   307  				// Skip rollup rules unless configured otherwise.
   308  				// We only want to consider mapping rules here,
   309  				// as we still want to apply default mapping rules to
   310  				// metrics that are rolled up to ensure the underlying metric
   311  				// gets written to aggregated namespaces.
   312  				if pipe.IsMappingRule() {
   313  					for _, sp := range pipe.StoragePolicies {
   314  						a.mappingRuleStoragePolicies =
   315  							append(a.mappingRuleStoragePolicies, sp)
   316  					}
   317  				} else {
   318  					a.debugLogMatch(
   319  						"skipping rollup rule in populating active mapping rule policies",
   320  						debugLogMatchOptions{},
   321  					)
   322  				}
   323  			}
   324  		}
   325  
   326  		// Only sample if going to actually aggregate
   327  		pipelines := ruleStagedMetadatas[len(ruleStagedMetadatas)-1]
   328  		a.curr.Pipelines = append(a.curr.Pipelines, pipelines.Pipelines...)
   329  	}
   330  
   331  	// Next, we cover auto-mapping (otherwise referred to as default) rules.
   332  	// We always aggregate any default rules with a few exceptions:
   333  	// 1. A mapping rule has provided an override for a storage policy,
   334  	//    if so then skip aggregating for that storage policy.
   335  	//    This is what we calculated in the step above.
   336  	// 2. Any type of drop rule has been set. Drop rules should mean that the auto-mapping rules are ignored.
   337  	if !a.curr.Pipelines.IsDropPolicySet() {
   338  		// No drop rule has been set as part of rule matching.
   339  		for idx, stagedMetadatasProto := range a.defaultStagedMetadatasProtos {
   340  			// NB(r): Need to take copy of default staged metadatas as we
   341  			// sometimes mutate it.
   342  			stagedMetadatas := a.defaultStagedMetadatasCopies[idx]
   343  			err := stagedMetadatas.FromProto(stagedMetadatasProto)
   344  			if err != nil {
   345  				return SamplesAppenderResult{},
   346  					fmt.Errorf("unable to copy default staged metadatas: %v", err)
   347  			}
   348  
   349  			// Save the staged metadatas back to the idx so all slices can be reused.
   350  			a.defaultStagedMetadatasCopies[idx] = stagedMetadatas
   351  
   352  			stagedMetadataBeforeFilter := stagedMetadatas[:]
   353  			if len(a.mappingRuleStoragePolicies) != 0 {
   354  				// If mapping rules have applied aggregations for
   355  				// storage policies then de-dupe so we don't have two
   356  				// active aggregations for the same storage policy.
   357  				stagedMetadatasAfterFilter := stagedMetadatas[:0]
   358  				for _, stagedMetadata := range stagedMetadatas {
   359  					pipesAfterFilter := stagedMetadata.Pipelines[:0]
   360  					for _, pipe := range stagedMetadata.Pipelines {
   361  						storagePoliciesAfterFilter := pipe.StoragePolicies[:0]
   362  						for _, sp := range pipe.StoragePolicies {
   363  							// Check aggregation for storage policy not already
   364  							// set by a mapping rule.
   365  							matchedByMappingRule := false
   366  							for _, existing := range a.mappingRuleStoragePolicies {
   367  								if sp.Equivalent(existing) {
   368  									matchedByMappingRule = true
   369  									a.debugLogMatch("downsampler skipping default mapping rule storage policy",
   370  										debugLogMatchOptions{Meta: stagedMetadataBeforeFilter})
   371  									break
   372  								}
   373  							}
   374  							if !matchedByMappingRule {
   375  								// Keep storage policy if not matched by mapping rule.
   376  								storagePoliciesAfterFilter =
   377  									append(storagePoliciesAfterFilter, sp)
   378  							}
   379  						}
   380  
   381  						// Update storage policies slice after filtering.
   382  						pipe.StoragePolicies = storagePoliciesAfterFilter
   383  
   384  						if len(pipe.StoragePolicies) != 0 {
   385  							// Keep storage policy if still has some storage policies.
   386  							pipesAfterFilter = append(pipesAfterFilter, pipe)
   387  						}
   388  					}
   389  
   390  					// Update pipelnes after filtering.
   391  					stagedMetadata.Pipelines = pipesAfterFilter
   392  
   393  					if len(stagedMetadata.Pipelines) != 0 {
   394  						// Keep staged metadata if still has some pipelines.
   395  						stagedMetadatasAfterFilter =
   396  							append(stagedMetadatasAfterFilter, stagedMetadata)
   397  					}
   398  				}
   399  
   400  				// Finally set the staged metadatas we're keeping
   401  				// as those that were kept after filtering.
   402  				stagedMetadatas = stagedMetadatasAfterFilter
   403  			}
   404  
   405  			// Now skip appending if after filtering there's no staged metadatas
   406  			// after any filtering that's applied.
   407  			if len(stagedMetadatas) == 0 {
   408  				a.debugLogMatch("downsampler skipping default mapping rule completely",
   409  					debugLogMatchOptions{Meta: stagedMetadataBeforeFilter})
   410  				continue
   411  			}
   412  
   413  			a.debugLogMatch("downsampler applying default mapping rule",
   414  				debugLogMatchOptions{Meta: stagedMetadatas})
   415  
   416  			pipelines := stagedMetadatas[len(stagedMetadatas)-1]
   417  			a.curr.Pipelines = append(a.curr.Pipelines, pipelines.Pipelines...)
   418  		}
   419  	}
   420  
   421  	// Apply the custom tags first so that they apply even if mapping
   422  	// rules drop the metric.
   423  	dropTimestamp = a.curr.Pipelines.ShouldDropTimestamp(
   424  		metadata.ShouldDropTimestampOptions{
   425  			UntimedRollups: a.untimedRollups,
   426  		})
   427  
   428  	// Apply drop policies results
   429  	a.curr.Pipelines, dropApplyResult = a.curr.Pipelines.ApplyOrRemoveDropPolicies()
   430  
   431  	// Now send the results of mapping / auto-mapping rules to the relevant downsampler.
   432  	// We explicitly skip sending if there's no work to be done: specifically
   433  	// if there's a drop policy or if the staged metadata is a no-op.
   434  	if len(a.curr.Pipelines) > 0 && !a.curr.IsDropPolicyApplied() && !a.curr.IsDefault() {
   435  		// Send to downsampler if we have something in the pipeline.
   436  		a.debugLogMatch("downsampler using built mapping staged metadatas",
   437  			debugLogMatchOptions{Meta: []metadata.StagedMetadata{a.curr}})
   438  
   439  		if err := a.addSamplesAppenders(tags, a.curr); err != nil {
   440  			return SamplesAppenderResult{}, err
   441  		}
   442  	}
   443  
   444  	// Finally, process and deliver staged metadata resulting from rollup rules.
   445  	numRollups := matchResult.NumNewRollupIDs()
   446  	for i := 0; i < numRollups; i++ {
   447  		rollup := matchResult.ForNewRollupIDsAt(i, nowNanos)
   448  
   449  		a.debugLogMatch("downsampler applying matched rollup rule",
   450  			debugLogMatchOptions{Meta: rollup.Metadatas, RollupID: rollup.ID})
   451  		a.multiSamplesAppender.addSamplesAppender(samplesAppender{
   452  			agg:             a.agg,
   453  			clientRemote:    a.clientRemote,
   454  			unownedID:       rollup.ID,
   455  			stagedMetadatas: rollup.Metadatas,
   456  
   457  			processedCountNonRollup: a.metrics.processedCountNonRollup,
   458  			processedCountRollup:    a.metrics.processedCountRollup,
   459  			operationsCount:         a.metrics.operationsCount,
   460  		})
   461  		if a.untimedRollups {
   462  			dropTimestamp = true
   463  		}
   464  	}
   465  	dropPolicyApplied := dropApplyResult != metadata.NoDropPolicyPresentResult
   466  	return SamplesAppenderResult{
   467  		SamplesAppender:     a.multiSamplesAppender,
   468  		IsDropPolicyApplied: dropPolicyApplied,
   469  		ShouldDropTimestamp: dropTimestamp,
   470  	}, nil
   471  }
   472  
   473  type debugLogMatchOptions struct {
   474  	Meta          metadata.StagedMetadatas
   475  	StoragePolicy policy.StoragePolicy
   476  	RollupID      []byte
   477  }
   478  
   479  func (a *metricsAppender) debugLogMatch(str string, opts debugLogMatchOptions) {
   480  	if !a.debugLogging {
   481  		return
   482  	}
   483  	fields := []zapcore.Field{
   484  		zap.String("tags", a.originalTags.String()),
   485  	}
   486  	if v := opts.RollupID; v != nil {
   487  		fields = append(fields, zap.ByteString("rollupID", v))
   488  	}
   489  	if v := opts.Meta; v != nil {
   490  		fields = append(fields, stagedMetadatasLogField(v))
   491  	}
   492  	if v := opts.StoragePolicy; v != policy.EmptyStoragePolicy {
   493  		fields = append(fields, zap.Stringer("storagePolicy", v))
   494  	}
   495  	a.logger.Debug(str, fields...)
   496  }
   497  
   498  func (a *metricsAppender) NextMetric() {
   499  	// Move the inuse encoders to cached as we should be done with using them.
   500  	a.resetEncoders()
   501  	a.resetTags()
   502  }
   503  
   504  func (a *metricsAppender) Finalize() {
   505  	// Return to pool.
   506  	a.pool.Put(a)
   507  }
   508  
   509  func (a *metricsAppender) tagEncoder() serialize.TagEncoder {
   510  	// Take an encoder from the cached encoder list, if not present get one
   511  	// from the pool. Add the returned encoder to the used list.
   512  	var tagEncoder serialize.TagEncoder
   513  	if len(a.cachedEncoders) == 0 {
   514  		tagEncoder = a.tagEncoderPool.Get()
   515  	} else {
   516  		l := len(a.cachedEncoders)
   517  		tagEncoder = a.cachedEncoders[l-1]
   518  		a.cachedEncoders = a.cachedEncoders[:l-1]
   519  	}
   520  	a.inuseEncoders = append(a.inuseEncoders, tagEncoder)
   521  	tagEncoder.Reset()
   522  	return tagEncoder
   523  }
   524  
   525  func (a *metricsAppender) tags() *tags {
   526  	// Take an encoder from the cached encoder list, if not present get one
   527  	// from the pool. Add the returned encoder to the used list.
   528  	var t *tags
   529  	if len(a.cachedTags) == 0 {
   530  		t = newTags()
   531  	} else {
   532  		l := len(a.cachedTags)
   533  		t = a.cachedTags[l-1]
   534  		a.cachedTags = a.cachedTags[:l-1]
   535  	}
   536  	a.inuseTags = append(a.inuseTags, t)
   537  	t.names = t.names[:0]
   538  	t.values = t.values[:0]
   539  	t.reset()
   540  	return t
   541  }
   542  
   543  func (a *metricsAppender) resetEncoders() {
   544  	a.cachedEncoders = append(a.cachedEncoders, a.inuseEncoders...)
   545  	for i := range a.inuseEncoders {
   546  		a.inuseEncoders[i] = nil
   547  	}
   548  	a.inuseEncoders = a.inuseEncoders[:0]
   549  }
   550  
   551  func (a *metricsAppender) resetTags() {
   552  	a.cachedTags = append(a.cachedTags, a.inuseTags...)
   553  	for i := range a.inuseTags {
   554  		a.inuseTags[i] = nil
   555  	}
   556  	a.inuseTags = a.inuseTags[:0]
   557  	a.originalTags = nil
   558  }
   559  
   560  func (a *metricsAppender) addSamplesAppenders(originalTags *tags, stagedMetadata metadata.StagedMetadata) error {
   561  	var pipelines []metadata.PipelineMetadata
   562  	for _, pipeline := range stagedMetadata.Pipelines {
   563  		// For pipeline which have tags to augment we generate and send
   564  		// separate IDs. Other pipelines return the same.
   565  		pipeline := pipeline
   566  		if len(pipeline.Tags) == 0 && len(pipeline.GraphitePrefix) == 0 {
   567  			pipelines = append(pipelines, pipeline)
   568  			continue
   569  		}
   570  
   571  		tags := a.processTags(originalTags, pipeline.GraphitePrefix, pipeline.Tags, pipeline.AggregationID)
   572  
   573  		sm := stagedMetadata
   574  		sm.Pipelines = []metadata.PipelineMetadata{pipeline}
   575  
   576  		appender, err := a.newSamplesAppender(tags, sm)
   577  		if err != nil {
   578  			return err
   579  		}
   580  		a.multiSamplesAppender.addSamplesAppender(appender)
   581  	}
   582  
   583  	if len(pipelines) == 0 {
   584  		return nil
   585  	}
   586  
   587  	sm := stagedMetadata
   588  	sm.Pipelines = pipelines
   589  
   590  	appender, err := a.newSamplesAppender(originalTags, sm)
   591  	if err != nil {
   592  		return err
   593  	}
   594  	a.multiSamplesAppender.addSamplesAppender(appender)
   595  	return nil
   596  }
   597  
   598  func (a *metricsAppender) newSamplesAppender(
   599  	tags *tags,
   600  	sm metadata.StagedMetadata,
   601  ) (samplesAppender, error) {
   602  	tagEncoder := a.tagEncoder()
   603  	if err := tagEncoder.Encode(tags); err != nil {
   604  		return samplesAppender{}, err
   605  	}
   606  	data, ok := tagEncoder.Data()
   607  	if !ok {
   608  		return samplesAppender{}, fmt.Errorf("unable to encode tags: names=%v, values=%v", tags.names, tags.values)
   609  	}
   610  	return samplesAppender{
   611  		agg:             a.agg,
   612  		clientRemote:    a.clientRemote,
   613  		unownedID:       data.Bytes(),
   614  		stagedMetadatas: []metadata.StagedMetadata{sm},
   615  
   616  		processedCountNonRollup: a.metrics.processedCountNonRollup,
   617  		processedCountRollup:    a.metrics.processedCountRollup,
   618  		operationsCount:         a.metrics.operationsCount,
   619  	}, nil
   620  }
   621  
   622  func (a *metricsAppender) processTags(
   623  	originalTags *tags,
   624  	graphitePrefix [][]byte,
   625  	t []models.Tag,
   626  	id aggregation.ID,
   627  ) *tags {
   628  	// Create the prefix tags if any.
   629  	tags := a.tags()
   630  	for i, path := range graphitePrefix {
   631  		// Add the graphite prefix as the initial graphite tags.
   632  		tags.append(graphite.TagName(i), path)
   633  	}
   634  
   635  	// Make a copy of the tags to augment.
   636  	prefixes := len(graphitePrefix)
   637  	for i := range originalTags.names {
   638  		// If we applied prefixes then we need to parse and modify the original
   639  		// tags. Check if the original tag was graphite type, if so add the
   640  		// number of prefixes to the tag index and update.
   641  		var (
   642  			name  = originalTags.names[i]
   643  			value = originalTags.values[i]
   644  		)
   645  		if prefixes > 0 {
   646  			// If the tag seen is a graphite tag then offset it based on number
   647  			// of prefixes we have seen.
   648  			if index, ok := graphite.TagIndex(name); ok {
   649  				name = graphite.TagName(index + prefixes)
   650  			}
   651  		}
   652  		tags.append(name, value)
   653  	}
   654  
   655  	// Add any additional tags we need to.
   656  	for _, tag := range t {
   657  		// If the tag is not special tag, then just add it.
   658  		if !bytes.HasPrefix(tag.Name, metric.M3MetricsPrefix) {
   659  			if len(tag.Name) > 0 && len(tag.Value) > 0 {
   660  				tags.append(tag.Name, tag.Value)
   661  			}
   662  			continue
   663  		}
   664  
   665  		// Handle m3 special tags.
   666  		if bytes.Equal(tag.Name, metric.M3MetricsGraphiteAggregation) {
   667  			// Add the aggregation tag as the last graphite tag.
   668  			types, err := id.Types()
   669  			if err != nil || len(types) == 0 {
   670  				continue
   671  			}
   672  			var (
   673  				count = tags.countPrefix(graphite.Prefix)
   674  				name  = graphite.TagName(count)
   675  				value = types[0].Name()
   676  			)
   677  			tags.append(name, value)
   678  		}
   679  		if bytes.Equal(tag.Name, metric.M3MetricsPromSummary) {
   680  			types, err := id.Types()
   681  			if err != nil || len(types) == 0 {
   682  				continue
   683  			}
   684  			value, ok := types[0].QuantileBytes()
   685  			if !ok {
   686  				continue
   687  			}
   688  			tags.append(metric.PromQuantileName, value)
   689  		}
   690  	}
   691  	return tags
   692  }
   693  
   694  func stagedMetadatasLogField(sm metadata.StagedMetadatas) zapcore.Field {
   695  	json, err := stagedMetadatasJSON(sm)
   696  	if err != nil {
   697  		return zap.String("stagedMetadatasDebugErr", err.Error())
   698  	}
   699  	return zap.Any("stagedMetadatas", json)
   700  }
   701  
   702  func stagedMetadatasJSON(sm metadata.StagedMetadatas) (interface{}, error) {
   703  	var pb metricpb.StagedMetadatas
   704  	if err := sm.ToProto(&pb); err != nil {
   705  		return nil, err
   706  	}
   707  	var buff bytes.Buffer
   708  	if err := (&jsonpb.Marshaler{}).Marshal(&buff, &pb); err != nil {
   709  		return nil, err
   710  	}
   711  	var result map[string]interface{}
   712  	if err := json.Unmarshal(buff.Bytes(), &result); err != nil {
   713  		return nil, err
   714  	}
   715  	return result, nil
   716  }