github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/cmd/services/m3coordinator/downsample/downsampler.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package downsample
    22  
    23  import (
    24  	"sync"
    25  
    26  	"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
    27  	"github.com/m3db/m3/src/metrics/rules"
    28  	"github.com/m3db/m3/src/metrics/rules/view"
    29  	"github.com/m3db/m3/src/query/storage/m3"
    30  	"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
    31  	"github.com/m3db/m3/src/query/ts"
    32  	xtime "github.com/m3db/m3/src/x/time"
    33  
    34  	"go.uber.org/zap"
    35  	"go.uber.org/zap/zapcore"
    36  )
    37  
    38  // Downsampler is a downsampler.
    39  type Downsampler interface {
    40  	rules.Fetcher
    41  	// LatestRollupRules returns a snapshot of the latest rollup rules for a given namespace
    42  	// at a given time.
    43  	NewMetricsAppender() (MetricsAppender, error)
    44  	// Enabled indicates whether the downsampler is enabled or not. A
    45  	// downsampler is enabled if there are aggregated ClusterNamespaces
    46  	// that exist as downsampling only applies to aggregations.
    47  	Enabled() bool
    48  }
    49  
    50  // MetricsAppender is a metrics appender that can build a samples
    51  // appender, only valid to use with a single caller at a time.
    52  type MetricsAppender interface {
    53  	// NextMetric progresses to building the next metric.
    54  	NextMetric()
    55  	// AddTag adds a tag to the current metric being built.
    56  	AddTag(name, value []byte)
    57  	// SamplesAppender returns a samples appender for the current
    58  	// metric built with the tags that have been set.
    59  	SamplesAppender(opts SampleAppenderOptions) (SamplesAppenderResult, error)
    60  	// Finalize finalizes the entire metrics appender for reuse.
    61  	Finalize()
    62  }
    63  
    64  // SamplesAppenderResult is the result from a SamplesAppender call.
    65  type SamplesAppenderResult struct {
    66  	SamplesAppender     SamplesAppender
    67  	IsDropPolicyApplied bool
    68  	ShouldDropTimestamp bool
    69  }
    70  
    71  // SampleAppenderOptions defines the options being used when constructing
    72  // the samples appender for a metric.
    73  type SampleAppenderOptions struct {
    74  	Override         bool
    75  	OverrideRules    SamplesAppenderOverrideRules
    76  	SeriesAttributes ts.SeriesAttributes
    77  }
    78  
    79  // SamplesAppenderOverrideRules provides override rules to
    80  // use instead of matching against default and dynamic matched rules
    81  // for an ID.
    82  type SamplesAppenderOverrideRules struct {
    83  	MappingRules []AutoMappingRule
    84  }
    85  
    86  // SamplesAppender is a downsampling samples appender,
    87  // that can only be called by a single caller at a time.
    88  // The client timestamp provided to Untimed methods is only used to monitor ingestion latency on the server. It is
    89  // dropped and a server-side timestamp is used for the metric.
    90  type SamplesAppender interface {
    91  	AppendUntimedCounterSample(t xtime.UnixNano, value int64, annotation []byte) error
    92  	AppendUntimedGaugeSample(t xtime.UnixNano, value float64, annotation []byte) error
    93  	AppendUntimedTimerSample(t xtime.UnixNano, value float64, annotation []byte) error
    94  	AppendCounterSample(t xtime.UnixNano, value int64, annotation []byte) error
    95  	AppendGaugeSample(t xtime.UnixNano, value float64, annotation []byte) error
    96  	AppendTimerSample(t xtime.UnixNano, value float64, annotation []byte) error
    97  }
    98  
    99  type downsampler struct {
   100  	opts DownsamplerOptions
   101  	agg  agg
   102  
   103  	sync.RWMutex
   104  	metricsAppenderOpts metricsAppenderOptions
   105  	enabled             bool
   106  }
   107  
   108  type downsamplerOptions struct {
   109  	opts DownsamplerOptions
   110  	agg  agg
   111  }
   112  
   113  func newDownsampler(opts downsamplerOptions) (*downsampler, error) {
   114  	if err := opts.opts.validate(); err != nil {
   115  		return nil, err
   116  	}
   117  
   118  	downsampler := &downsampler{
   119  		opts:                opts.opts,
   120  		agg:                 opts.agg,
   121  		metricsAppenderOpts: defaultMetricsAppenderOptions(opts.opts, opts.agg),
   122  	}
   123  
   124  	// No need to retain watch as NamespaceWatcher.Close() will handle closing any watches
   125  	// generated by creating listeners.
   126  	downsampler.opts.ClusterNamespacesWatcher.RegisterListener(downsampler)
   127  
   128  	return downsampler, nil
   129  }
   130  
   131  func defaultMetricsAppenderOptions(opts DownsamplerOptions, agg agg) metricsAppenderOptions {
   132  	debugLogging := false
   133  	logger := opts.InstrumentOptions.Logger()
   134  	if logger.Check(zapcore.DebugLevel, "debug") != nil {
   135  		debugLogging = true
   136  	}
   137  	scope := opts.InstrumentOptions.MetricsScope().SubScope("metrics_appender")
   138  	metrics := metricsAppenderMetrics{
   139  		processedCountNonRollup: scope.Tagged(map[string]string{"agg_type": "non_rollup"}).Counter("processed"),
   140  		processedCountRollup:    scope.Tagged(map[string]string{"agg_type": "rollup"}).Counter("processed"),
   141  		operationsCount:         scope.Counter("operations_processed"),
   142  	}
   143  
   144  	return metricsAppenderOptions{
   145  		agg:            agg.aggregator,
   146  		clientRemote:   agg.clientRemote,
   147  		clockOpts:      agg.clockOpts,
   148  		tagEncoderPool: agg.pools.tagEncoderPool,
   149  		matcher:        agg.matcher,
   150  		debugLogging:   debugLogging,
   151  		logger:         logger,
   152  		untimedRollups: agg.untimedRollups,
   153  		metrics:        metrics,
   154  	}
   155  }
   156  
   157  func (d *downsampler) LatestRollupRules(namespace []byte, timeNanos int64) ([]view.RollupRule, error) {
   158  	d.RLock()
   159  	defer d.RUnlock()
   160  	return d.agg.matcher.LatestRollupRules(namespace, timeNanos)
   161  }
   162  
   163  func (d *downsampler) NewMetricsAppender() (MetricsAppender, error) {
   164  	metricsAppender := d.agg.pools.metricsAppenderPool.Get()
   165  
   166  	d.RLock()
   167  	newMetricsAppenderOpts := d.metricsAppenderOpts
   168  	d.RUnlock()
   169  
   170  	metricsAppender.reset(newMetricsAppenderOpts)
   171  
   172  	return metricsAppender, nil
   173  }
   174  
   175  func (d *downsampler) Enabled() bool {
   176  	d.RLock()
   177  	defer d.RUnlock()
   178  
   179  	return d.enabled
   180  }
   181  
   182  func (d *downsampler) OnUpdate(namespaces m3.ClusterNamespaces) {
   183  	logger := d.opts.InstrumentOptions.Logger()
   184  
   185  	if len(namespaces) == 0 {
   186  		logger.Debug("received empty list of namespaces. not updating staged metadata")
   187  		return
   188  	}
   189  
   190  	var hasAggregatedNamespaces bool
   191  	for _, namespace := range namespaces {
   192  		attrs := namespace.Options().Attributes()
   193  		if attrs.MetricsType == storagemetadata.AggregatedMetricsType {
   194  			hasAggregatedNamespaces = true
   195  			break
   196  		}
   197  	}
   198  
   199  	autoMappingRules, err := NewAutoMappingRules(namespaces)
   200  	if err != nil {
   201  		logger.Error("could not generate automapping rules for aggregated namespaces."+
   202  			" aggregations will continue with current configuration.", zap.Error(err))
   203  		return
   204  	}
   205  	defaultStagedMetadatasProtos := make([]metricpb.StagedMetadatas, 0, len(autoMappingRules))
   206  	for _, rule := range autoMappingRules {
   207  		metadatas, err := rule.StagedMetadatas()
   208  		if err != nil {
   209  			logger.Error("could not generate staged metadata from automapping rules."+
   210  				" aggregations will continue with current configuration.", zap.Error(err))
   211  			return
   212  		}
   213  
   214  		var metadatasProto metricpb.StagedMetadatas
   215  		if err := metadatas.ToProto(&metadatasProto); err != nil {
   216  			logger.Error("could not generate staged metadata from automapping rules."+
   217  				" aggregations will continue with current configuration.", zap.Error(err))
   218  			return
   219  		}
   220  
   221  		defaultStagedMetadatasProtos = append(defaultStagedMetadatasProtos, metadatasProto)
   222  	}
   223  
   224  	d.Lock()
   225  	d.metricsAppenderOpts.defaultStagedMetadatasProtos = defaultStagedMetadatasProtos
   226  	// Can only downsample when aggregated namespaces are available.
   227  	d.enabled = hasAggregatedNamespaces
   228  	d.Unlock()
   229  }