github.com/m3db/m3@v1.5.0/src/cmd/services/m3coordinator/ingest/write.go (about)

     1  // Copyright (c) 2019 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package ingest
    22  
    23  import (
    24  	"context"
    25  	"sync"
    26  
    27  	"github.com/m3db/m3/src/cmd/services/m3coordinator/downsample"
    28  	"github.com/m3db/m3/src/metrics/policy"
    29  	"github.com/m3db/m3/src/query/models"
    30  	"github.com/m3db/m3/src/query/storage"
    31  	"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
    32  	"github.com/m3db/m3/src/query/ts"
    33  	xerrors "github.com/m3db/m3/src/x/errors"
    34  	"github.com/m3db/m3/src/x/instrument"
    35  	xsync "github.com/m3db/m3/src/x/sync"
    36  	xtime "github.com/m3db/m3/src/x/time"
    37  
    38  	"github.com/uber-go/tally"
    39  )
    40  
    41  var (
    42  	unaggregatedStoragePolicy   = policy.NewStoragePolicy(0, xtime.Unit(0), 0)
    43  	unaggregatedStoragePolicies = []policy.StoragePolicy{
    44  		unaggregatedStoragePolicy,
    45  	}
    46  
    47  	sourceTags = map[ts.SourceType]string{
    48  		ts.SourceTypePrometheus:  "prometheus",
    49  		ts.SourceTypeGraphite:    "graphite",
    50  		ts.SourceTypeOpenMetrics: "open-metrics",
    51  	}
    52  )
    53  
    54  // IterValue is the value returned by the iterator.
    55  type IterValue struct {
    56  	Tags       models.Tags
    57  	Datapoints ts.Datapoints
    58  	Attributes ts.SeriesAttributes
    59  	Unit       xtime.Unit
    60  	Metadata   ts.Metadata
    61  	Annotation []byte
    62  }
    63  
    64  // DownsampleAndWriteIter is an interface that can be implemented to use
    65  // the WriteBatch method.
    66  type DownsampleAndWriteIter interface {
    67  	Next() bool
    68  	Current() IterValue
    69  	Reset() error
    70  	Error() error
    71  	SetCurrentMetadata(ts.Metadata)
    72  }
    73  
    74  // DownsamplerAndWriter is the interface for the downsamplerAndWriter which
    75  // writes metrics to the downsampler as well as to storage in unaggregated form.
    76  type DownsamplerAndWriter interface {
    77  	Write(
    78  		ctx context.Context,
    79  		tags models.Tags,
    80  		datapoints ts.Datapoints,
    81  		unit xtime.Unit,
    82  		annotation []byte,
    83  		overrides WriteOptions,
    84  		source ts.SourceType,
    85  	) error
    86  
    87  	WriteBatch(
    88  		ctx context.Context,
    89  		iter DownsampleAndWriteIter,
    90  		overrides WriteOptions,
    91  	) BatchError
    92  
    93  	Storage() storage.Storage
    94  
    95  	Downsampler() downsample.Downsampler
    96  }
    97  
    98  // BatchError allows for access to individual errors.
    99  type BatchError interface {
   100  	error
   101  	Errors() []error
   102  	LastError() error
   103  }
   104  
   105  // WriteOptions contains overrides for the downsampling mapping
   106  // rules and storage policies for a given write.
   107  type WriteOptions struct {
   108  	DownsampleMappingRules []downsample.AutoMappingRule
   109  	WriteStoragePolicies   []policy.StoragePolicy
   110  
   111  	DownsampleOverride bool
   112  	WriteOverride      bool
   113  }
   114  
   115  type downsamplerAndWriterMetrics struct {
   116  	dropped metricsBySource
   117  	written metricsBySource
   118  }
   119  
   120  type metricsBySource struct {
   121  	bySource  map[ts.SourceType]tally.Counter
   122  	byUnknown tally.Counter
   123  }
   124  
   125  func (m metricsBySource) report(source ts.SourceType) {
   126  	counter, ok := m.bySource[source]
   127  	if !ok {
   128  		counter = m.byUnknown
   129  	}
   130  	counter.Inc(1)
   131  }
   132  
   133  // downsamplerAndWriter encapsulates the logic for writing data to the downsampler,
   134  // as well as in unaggregated form to storage.
   135  type downsamplerAndWriter struct {
   136  	store       storage.Storage
   137  	downsampler downsample.Downsampler
   138  	workerPool  xsync.PooledWorkerPool
   139  
   140  	metrics downsamplerAndWriterMetrics
   141  }
   142  
   143  // NewDownsamplerAndWriter creates a new downsampler and writer.
   144  func NewDownsamplerAndWriter(
   145  	store storage.Storage,
   146  	downsampler downsample.Downsampler,
   147  	workerPool xsync.PooledWorkerPool,
   148  	instrumentOpts instrument.Options,
   149  ) DownsamplerAndWriter {
   150  	scope := instrumentOpts.MetricsScope().SubScope("downsampler")
   151  
   152  	return &downsamplerAndWriter{
   153  		store:       store,
   154  		downsampler: downsampler,
   155  		workerPool:  workerPool,
   156  		metrics: downsamplerAndWriterMetrics{
   157  			dropped: newMetricsBySource(scope, "metrics_dropped"),
   158  			written: newMetricsBySource(scope, "metrics_written"),
   159  		},
   160  	}
   161  }
   162  
   163  func newMetricsBySource(scope tally.Scope, name string) metricsBySource {
   164  	metrics := metricsBySource{
   165  		bySource:  make(map[ts.SourceType]tally.Counter, len(sourceTags)),
   166  		byUnknown: scope.Tagged(map[string]string{"source": "unknown"}).Counter(name),
   167  	}
   168  
   169  	for source, tag := range sourceTags {
   170  		metrics.bySource[source] = scope.Tagged(map[string]string{"source": tag}).Counter(name)
   171  	}
   172  
   173  	return metrics
   174  }
   175  
   176  func (d *downsamplerAndWriter) Write(
   177  	ctx context.Context,
   178  	tags models.Tags,
   179  	datapoints ts.Datapoints,
   180  	unit xtime.Unit,
   181  	annotation []byte,
   182  	overrides WriteOptions,
   183  	source ts.SourceType,
   184  ) error {
   185  	var (
   186  		multiErr         = xerrors.NewMultiError()
   187  		dropUnaggregated bool
   188  	)
   189  
   190  	if d.shouldDownsample(overrides) {
   191  		var err error
   192  		dropUnaggregated, err = d.writeToDownsampler(tags, datapoints, annotation, overrides)
   193  		if err != nil {
   194  			multiErr = multiErr.Add(err)
   195  		}
   196  	}
   197  
   198  	if dropUnaggregated {
   199  		d.metrics.dropped.report(source)
   200  	} else if d.shouldWrite(overrides) {
   201  		err := d.writeToStorage(ctx, tags, datapoints, unit, annotation, overrides, source)
   202  		if err != nil {
   203  			multiErr = multiErr.Add(err)
   204  		}
   205  	}
   206  
   207  	return multiErr.FinalError()
   208  }
   209  
   210  func (d *downsamplerAndWriter) shouldWrite(
   211  	overrides WriteOptions,
   212  ) bool {
   213  	var (
   214  		// Ensure storage set.
   215  		storageExists = d.store != nil
   216  		// Ensure using default storage policies or some storage policies set.
   217  		useDefaultStoragePolicies = !overrides.WriteOverride
   218  		// If caller tried to override the storage policies, make sure there's
   219  		// at least one.
   220  		_, writeOverride = d.writeOverrideStoragePolicies(overrides)
   221  	)
   222  	// Only write directly to storage if the store exists, and caller wants to
   223  	// use the default storage policies, or they're trying to override the
   224  	// storage policies and they've provided at least one override to do so.
   225  	return storageExists && (useDefaultStoragePolicies || writeOverride)
   226  }
   227  
   228  func (d *downsamplerAndWriter) writeOverrideStoragePolicies(
   229  	overrides WriteOptions,
   230  ) ([]policy.StoragePolicy, bool) {
   231  	writeOverride := overrides.WriteOverride && len(overrides.WriteStoragePolicies) > 0
   232  	if !writeOverride {
   233  		return nil, false
   234  	}
   235  	return overrides.WriteStoragePolicies, true
   236  }
   237  
   238  func (d *downsamplerAndWriter) shouldDownsample(
   239  	overrides WriteOptions,
   240  ) bool {
   241  	var (
   242  		// If they didn't request the mapping rules to be overridden, then assume they want the default
   243  		// ones.
   244  		useDefaultMappingRules = !overrides.DownsampleOverride
   245  		// If they did try and override the mapping rules, make sure they've provided at least one.
   246  		_, downsampleOverride = d.downsampleOverrideRules(overrides)
   247  	)
   248  	// Only downsample if the downsampler is enabled, and they either want to use the default mapping
   249  	// rules, or they're trying to override the mapping rules and they've provided at least one
   250  	// override to do so.
   251  	return d.downsampler.Enabled() && (useDefaultMappingRules || downsampleOverride)
   252  }
   253  
   254  func (d *downsamplerAndWriter) downsampleOverrideRules(
   255  	overrides WriteOptions,
   256  ) ([]downsample.AutoMappingRule, bool) {
   257  	downsampleOverride := overrides.DownsampleOverride && len(overrides.DownsampleMappingRules) > 0
   258  	if !downsampleOverride {
   259  		return nil, false
   260  	}
   261  	return overrides.DownsampleMappingRules, true
   262  }
   263  
   264  func (d *downsamplerAndWriter) writeToDownsampler(
   265  	tags models.Tags,
   266  	datapoints ts.Datapoints,
   267  	annotation []byte,
   268  	overrides WriteOptions,
   269  ) (bool, error) {
   270  	if err := tags.Validate(); err != nil {
   271  		return false, err
   272  	}
   273  
   274  	appender, err := d.downsampler.NewMetricsAppender()
   275  	if err != nil {
   276  		return false, err
   277  	}
   278  
   279  	defer appender.Finalize()
   280  
   281  	for _, tag := range tags.Tags {
   282  		appender.AddTag(tag.Name, tag.Value)
   283  	}
   284  
   285  	if tags.Opts.IDSchemeType() == models.TypeGraphite {
   286  		// NB(r): This is gross, but if this is a graphite metric then
   287  		// we are going to set a special tag that means the downsampler
   288  		// will write a graphite ID. This should really be plumbed
   289  		// through the downsampler in general, but right now the aggregator
   290  		// does not allow context to be attached to a metric so when it calls
   291  		// back the context is lost currently.
   292  		// TODO_FIX_GRAPHITE_TAGGING: Using this string constant to track
   293  		// all places worth fixing this hack. There is at least one
   294  		// other path where flows back to the coordinator from the aggregator
   295  		// and this tag is interpreted, eventually need to handle more cleanly.
   296  		appender.AddTag(downsample.MetricsOptionIDSchemeTagName,
   297  			downsample.GraphiteIDSchemeTagValue)
   298  	}
   299  
   300  	// NB: we don't set series attributes on the sample appender options here.
   301  	// In practice this isn't needed because only the carbon ingest path comes through here.
   302  	var appenderOpts downsample.SampleAppenderOptions
   303  	if downsampleMappingRuleOverrides, ok := d.downsampleOverrideRules(overrides); ok {
   304  		appenderOpts = downsample.SampleAppenderOptions{
   305  			Override: true,
   306  			OverrideRules: downsample.SamplesAppenderOverrideRules{
   307  				MappingRules: downsampleMappingRuleOverrides,
   308  			},
   309  		}
   310  	}
   311  
   312  	result, err := appender.SamplesAppender(appenderOpts)
   313  	if err != nil {
   314  		return false, err
   315  	}
   316  
   317  	for _, dp := range datapoints {
   318  		if result.ShouldDropTimestamp {
   319  			err = result.SamplesAppender.AppendUntimedGaugeSample(dp.Timestamp, dp.Value, annotation)
   320  		} else {
   321  			err = result.SamplesAppender.AppendGaugeSample(
   322  				dp.Timestamp, dp.Value, annotation,
   323  			)
   324  		}
   325  		if err != nil {
   326  			return result.IsDropPolicyApplied, err
   327  		}
   328  	}
   329  
   330  	return result.IsDropPolicyApplied, nil
   331  }
   332  
   333  func (d *downsamplerAndWriter) writeToStorage(
   334  	ctx context.Context,
   335  	tags models.Tags,
   336  	datapoints ts.Datapoints,
   337  	unit xtime.Unit,
   338  	annotation []byte,
   339  	overrides WriteOptions,
   340  	source ts.SourceType,
   341  ) error {
   342  	d.metrics.written.report(source)
   343  
   344  	storagePolicies, ok := d.writeOverrideStoragePolicies(overrides)
   345  	if !ok {
   346  		// NB(r): Allocate the write query at the top
   347  		// of the pooled worker instead of need to pass
   348  		// the options down the stack which can cause
   349  		// the stack to grow (and sometimes cause stack splits).
   350  		writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
   351  			Tags:       tags,
   352  			Datapoints: datapoints,
   353  			Unit:       unit,
   354  			Annotation: annotation,
   355  			Attributes: storageAttributesFromPolicy(unaggregatedStoragePolicy),
   356  		})
   357  		if err != nil {
   358  			return err
   359  		}
   360  		return d.store.Write(ctx, writeQuery)
   361  	}
   362  
   363  	var (
   364  		wg       sync.WaitGroup
   365  		multiErr xerrors.MultiError
   366  		errLock  sync.Mutex
   367  	)
   368  
   369  	for _, p := range storagePolicies {
   370  		p := p // Capture for goroutine.
   371  
   372  		wg.Add(1)
   373  		d.workerPool.Go(func() {
   374  			// NB(r): Allocate the write query at the top
   375  			// of the pooled worker instead of need to pass
   376  			// the options down the stack which can cause
   377  			// the stack to grow (and sometimes cause stack splits).
   378  			writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
   379  				Tags:       tags,
   380  				Datapoints: datapoints,
   381  				Unit:       unit,
   382  				Annotation: annotation,
   383  				Attributes: storageAttributesFromPolicy(p),
   384  			})
   385  			if err == nil {
   386  				err = d.store.Write(ctx, writeQuery)
   387  			}
   388  			if err != nil {
   389  				errLock.Lock()
   390  				multiErr = multiErr.Add(err)
   391  				errLock.Unlock()
   392  			}
   393  
   394  			wg.Done()
   395  		})
   396  	}
   397  
   398  	wg.Wait()
   399  	return multiErr.FinalError()
   400  }
   401  
   402  func (d *downsamplerAndWriter) WriteBatch(
   403  	ctx context.Context,
   404  	iter DownsampleAndWriteIter,
   405  	overrides WriteOptions,
   406  ) BatchError {
   407  	var (
   408  		wg       sync.WaitGroup
   409  		multiErr xerrors.MultiError
   410  		errLock  sync.Mutex
   411  		addError = func(err error) {
   412  			errLock.Lock()
   413  			multiErr = multiErr.Add(err)
   414  			errLock.Unlock()
   415  		}
   416  	)
   417  
   418  	if d.shouldDownsample(overrides) {
   419  		if errs := d.writeAggregatedBatch(iter, overrides); !errs.Empty() {
   420  			// Iterate and add through all the error to the multi error. It is
   421  			// ok not to use the addError method here as we are running single
   422  			// threaded at this point.
   423  			for _, err := range errs.Errors() {
   424  				multiErr = multiErr.Add(err)
   425  			}
   426  		}
   427  	}
   428  
   429  	// Reset the iter to write the unaggregated data.
   430  	resetErr := iter.Reset()
   431  	if resetErr != nil {
   432  		addError(resetErr)
   433  	}
   434  
   435  	if d.shouldWrite(overrides) && resetErr == nil {
   436  		// Write unaggregated. Spin up all the background goroutines that make
   437  		// network requests before we do the synchronous work of writing to the
   438  		// downsampler.
   439  		storagePolicies, ok := d.writeOverrideStoragePolicies(overrides)
   440  		if !ok {
   441  			storagePolicies = unaggregatedStoragePolicies
   442  		}
   443  
   444  		for iter.Next() {
   445  			value := iter.Current()
   446  			if value.Metadata.DropUnaggregated {
   447  				d.metrics.dropped.report(value.Attributes.Source)
   448  				continue
   449  			}
   450  
   451  			d.metrics.written.report(value.Attributes.Source)
   452  
   453  			for _, p := range storagePolicies {
   454  				p := p // Capture for lambda.
   455  				wg.Add(1)
   456  				d.workerPool.Go(func() {
   457  					// NB(r): Allocate the write query at the top
   458  					// of the pooled worker instead of need to pass
   459  					// the options down the stack which can cause
   460  					// the stack to grow (and sometimes cause stack splits).
   461  					writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
   462  						Tags:       value.Tags,
   463  						Datapoints: value.Datapoints,
   464  						Unit:       value.Unit,
   465  						Annotation: value.Annotation,
   466  						Attributes: storageAttributesFromPolicy(p),
   467  					})
   468  					if err == nil {
   469  						err = d.store.Write(ctx, writeQuery)
   470  					}
   471  					if err != nil {
   472  						addError(err)
   473  					}
   474  					wg.Done()
   475  				})
   476  			}
   477  		}
   478  	}
   479  
   480  	wg.Wait()
   481  	if multiErr.NumErrors() == 0 {
   482  		return nil
   483  	}
   484  
   485  	return multiErr
   486  }
   487  
   488  func (d *downsamplerAndWriter) writeAggregatedBatch(
   489  	iter DownsampleAndWriteIter,
   490  	overrides WriteOptions,
   491  ) xerrors.MultiError {
   492  	var multiErr xerrors.MultiError
   493  	appender, err := d.downsampler.NewMetricsAppender()
   494  	if err != nil {
   495  		return multiErr.Add(err)
   496  	}
   497  
   498  	defer appender.Finalize()
   499  
   500  	for iter.Next() {
   501  		appender.NextMetric()
   502  
   503  		value := iter.Current()
   504  		if err := value.Tags.Validate(); err != nil {
   505  			multiErr = multiErr.Add(err)
   506  			continue
   507  		}
   508  
   509  		for _, tag := range value.Tags.Tags {
   510  			appender.AddTag(tag.Name, tag.Value)
   511  		}
   512  
   513  		if value.Tags.Opts.IDSchemeType() == models.TypeGraphite {
   514  			// NB(r): This is gross, but if this is a graphite metric then
   515  			// we are going to set a special tag that means the downsampler
   516  			// will write a graphite ID. This should really be plumbed
   517  			// through the downsampler in general, but right now the aggregator
   518  			// does not allow context to be attached to a metric so when it calls
   519  			// back the context is lost currently.
   520  			// TODO_FIX_GRAPHITE_TAGGING: Using this string constant to track
   521  			// all places worth fixing this hack. There is at least one
   522  			// other path where flows back to the coordinator from the aggregator
   523  			// and this tag is interpreted, eventually need to handle more cleanly.
   524  			appender.AddTag(downsample.MetricsOptionIDSchemeTagName,
   525  				downsample.GraphiteIDSchemeTagValue)
   526  		}
   527  
   528  		opts := downsample.SampleAppenderOptions{
   529  			SeriesAttributes: value.Attributes,
   530  		}
   531  		if downsampleMappingRuleOverrides, ok := d.downsampleOverrideRules(overrides); ok {
   532  			opts = downsample.SampleAppenderOptions{
   533  				Override: true,
   534  				OverrideRules: downsample.SamplesAppenderOverrideRules{
   535  					MappingRules: downsampleMappingRuleOverrides,
   536  				},
   537  			}
   538  		}
   539  
   540  		result, err := appender.SamplesAppender(opts)
   541  		if err != nil {
   542  			multiErr = multiErr.Add(err)
   543  			continue
   544  		}
   545  
   546  		if result.IsDropPolicyApplied {
   547  			iter.SetCurrentMetadata(ts.Metadata{DropUnaggregated: true})
   548  		}
   549  
   550  		for _, dp := range value.Datapoints {
   551  			switch value.Attributes.M3Type {
   552  			case ts.M3MetricTypeGauge:
   553  				if result.ShouldDropTimestamp {
   554  					err = result.SamplesAppender.AppendUntimedGaugeSample(dp.Timestamp, dp.Value, value.Annotation)
   555  				} else {
   556  					err = result.SamplesAppender.AppendGaugeSample(
   557  						dp.Timestamp, dp.Value, value.Annotation,
   558  					)
   559  				}
   560  			case ts.M3MetricTypeCounter:
   561  				if result.ShouldDropTimestamp {
   562  					err = result.SamplesAppender.AppendUntimedCounterSample(
   563  						dp.Timestamp, int64(dp.Value), value.Annotation)
   564  				} else {
   565  					err = result.SamplesAppender.AppendCounterSample(
   566  						dp.Timestamp, int64(dp.Value), value.Annotation,
   567  					)
   568  				}
   569  			case ts.M3MetricTypeTimer:
   570  				if result.ShouldDropTimestamp {
   571  					err = result.SamplesAppender.AppendUntimedTimerSample(dp.Timestamp, dp.Value, value.Annotation)
   572  				} else {
   573  					err = result.SamplesAppender.AppendTimerSample(
   574  						dp.Timestamp, dp.Value, value.Annotation,
   575  					)
   576  				}
   577  			}
   578  			if err != nil {
   579  				// If we see an error break out so we can try processing the
   580  				// next datapoint.
   581  				multiErr = multiErr.Add(err)
   582  			}
   583  		}
   584  	}
   585  
   586  	return multiErr.Add(iter.Error())
   587  }
   588  
   589  func (d *downsamplerAndWriter) Downsampler() downsample.Downsampler {
   590  	return d.downsampler
   591  }
   592  
   593  func (d *downsamplerAndWriter) Storage() storage.Storage {
   594  	return d.store
   595  }
   596  
   597  func storageAttributesFromPolicy(
   598  	p policy.StoragePolicy,
   599  ) storagemetadata.Attributes {
   600  	attributes := storagemetadata.Attributes{
   601  		MetricsType: storagemetadata.UnaggregatedMetricsType,
   602  	}
   603  	if p != unaggregatedStoragePolicy {
   604  		attributes = storagemetadata.Attributes{
   605  			// Assume all overridden storage policies are for aggregated namespaces.
   606  			MetricsType: storagemetadata.AggregatedMetricsType,
   607  			Resolution:  p.Resolution().Window,
   608  			Retention:   p.Retention().Duration(),
   609  		}
   610  	}
   611  	return attributes
   612  }