github.com/m3db/m3@v1.5.0/src/cmd/services/m3coordinator/downsample/flush_handler.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package downsample
    22  
    23  import (
    24  	"bytes"
    25  	"context"
    26  	"sync"
    27  
    28  	"github.com/m3db/m3/src/aggregator/aggregator/handler"
    29  	"github.com/m3db/m3/src/aggregator/aggregator/handler/writer"
    30  	"github.com/m3db/m3/src/metrics/metric/aggregated"
    31  	"github.com/m3db/m3/src/query/models"
    32  	"github.com/m3db/m3/src/query/storage"
    33  	"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
    34  	"github.com/m3db/m3/src/query/ts"
    35  	"github.com/m3db/m3/src/x/convert"
    36  	"github.com/m3db/m3/src/x/instrument"
    37  	"github.com/m3db/m3/src/x/serialize"
    38  	xsync "github.com/m3db/m3/src/x/sync"
    39  	xtime "github.com/m3db/m3/src/x/time"
    40  
    41  	"github.com/uber-go/tally"
    42  	"go.uber.org/zap"
    43  )
    44  
    45  var (
    46  	// MetricsOptionIDSchemeTagName is a meta tag
    47  	// that describes the ID should use a specific ID scheme.
    48  	MetricsOptionIDSchemeTagName = []byte("__option_id_scheme__")
    49  	// GraphiteIDSchemeTagValue specifies that the graphite ID
    50  	// scheme should be used for a metric.
    51  	GraphiteIDSchemeTagValue = []byte("graphite")
    52  )
    53  
    54  var (
    55  	aggregationSuffixTag = []byte("agg")
    56  )
    57  
    58  type downsamplerFlushHandler struct {
    59  	sync.RWMutex
    60  	storage                storage.Appender
    61  	metricTagsIteratorPool serialize.MetricTagsIteratorPool
    62  	workerPool             xsync.WorkerPool
    63  	instrumentOpts         instrument.Options
    64  	metrics                downsamplerFlushHandlerMetrics
    65  	tagOptions             models.TagOptions
    66  }
    67  
    68  type downsamplerFlushHandlerMetrics struct {
    69  	flushSuccess tally.Counter
    70  	flushErrors  tally.Counter
    71  }
    72  
    73  func newDownsamplerFlushHandlerMetrics(
    74  	scope tally.Scope,
    75  ) downsamplerFlushHandlerMetrics {
    76  	return downsamplerFlushHandlerMetrics{
    77  		flushSuccess: scope.Counter("flush-success"),
    78  		flushErrors:  scope.Counter("flush-errors"),
    79  	}
    80  }
    81  
    82  func newDownsamplerFlushHandler(
    83  	storage storage.Appender,
    84  	metricTagsIteratorPool serialize.MetricTagsIteratorPool,
    85  	workerPool xsync.WorkerPool,
    86  	tagOptions models.TagOptions,
    87  	instrumentOpts instrument.Options,
    88  ) handler.Handler {
    89  	scope := instrumentOpts.MetricsScope().SubScope("downsampler-flush-handler")
    90  	return &downsamplerFlushHandler{
    91  		storage:                storage,
    92  		metricTagsIteratorPool: metricTagsIteratorPool,
    93  		workerPool:             workerPool,
    94  		instrumentOpts:         instrumentOpts,
    95  		metrics:                newDownsamplerFlushHandlerMetrics(scope),
    96  		tagOptions:             tagOptions,
    97  	}
    98  }
    99  
   100  func (h *downsamplerFlushHandler) NewWriter(
   101  	scope tally.Scope,
   102  ) (writer.Writer, error) {
   103  	return &downsamplerFlushHandlerWriter{
   104  		tagOptions: h.tagOptions,
   105  		ctx:        context.Background(),
   106  		handler:    h,
   107  	}, nil
   108  }
   109  
   110  func (h *downsamplerFlushHandler) Close() {
   111  }
   112  
   113  type downsamplerFlushHandlerWriter struct {
   114  	tagOptions models.TagOptions
   115  	wg         sync.WaitGroup
   116  	ctx        context.Context
   117  	handler    *downsamplerFlushHandler
   118  }
   119  
   120  func (w *downsamplerFlushHandlerWriter) Write(
   121  	mp aggregated.ChunkedMetricWithStoragePolicy,
   122  ) error {
   123  	w.wg.Add(1)
   124  	w.handler.workerPool.Go(func() {
   125  		defer w.wg.Done()
   126  
   127  		logger := w.handler.instrumentOpts.Logger()
   128  
   129  		iter := w.handler.metricTagsIteratorPool.Get()
   130  		iter.Reset(mp.ChunkedID.Data)
   131  
   132  		expected := iter.NumTags()
   133  		chunkSuffix := mp.ChunkedID.Suffix
   134  		if len(chunkSuffix) != 0 {
   135  			expected++
   136  		}
   137  
   138  		tags := models.NewTags(expected, w.tagOptions)
   139  		for iter.Next() {
   140  			name, value := iter.Current()
   141  
   142  			// NB(r): Quite gross, need to actually make it possible to plumb this
   143  			// through for each metric.
   144  			// TODO_FIX_GRAPHITE_TAGGING: Using this string constant to track
   145  			// all places worth fixing this hack. There is at least one
   146  			// other path where flows back to the coordinator from the aggregator
   147  			// and this tag is interpreted, eventually need to handle more cleanly.
   148  			if bytes.Equal(name, MetricsOptionIDSchemeTagName) {
   149  				if bytes.Equal(value, GraphiteIDSchemeTagValue) &&
   150  					tags.Opts.IDSchemeType() != models.TypeGraphite {
   151  					iter.Reset(mp.ChunkedID.Data)
   152  					tags.Opts = w.tagOptions.SetIDSchemeType(models.TypeGraphite)
   153  					tags.Tags = tags.Tags[:0]
   154  				}
   155  				// Continue, whether we updated and need to restart iteration,
   156  				// or if passing for the second time
   157  				continue
   158  			}
   159  
   160  			tags = tags.AddTag(models.Tag{Name: name, Value: value}.Clone())
   161  		}
   162  
   163  		if len(chunkSuffix) != 0 {
   164  			tags = tags.AddTag(models.Tag{Name: aggregationSuffixTag, Value: chunkSuffix}.Clone())
   165  		}
   166  
   167  		err := iter.Err()
   168  		iter.Close()
   169  		if err != nil {
   170  			logger.Error("downsampler flush error preparing write", zap.Error(err))
   171  			w.handler.metrics.flushErrors.Inc(1)
   172  			return
   173  		}
   174  
   175  		writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
   176  			Tags: tags,
   177  			Datapoints: ts.Datapoints{ts.Datapoint{
   178  				Timestamp: xtime.UnixNano(mp.TimeNanos),
   179  				Value:     mp.Value,
   180  			}},
   181  			Unit:       convert.UnitForM3DB(mp.StoragePolicy.Resolution().Precision),
   182  			Annotation: mp.Annotation,
   183  			Attributes: storagemetadata.Attributes{
   184  				MetricsType: storagemetadata.AggregatedMetricsType,
   185  				Retention:   mp.StoragePolicy.Retention().Duration(),
   186  				Resolution:  mp.StoragePolicy.Resolution().Window,
   187  			},
   188  		})
   189  		if err != nil {
   190  			logger.Error("downsampler flush error creating write query", zap.Error(err))
   191  			w.handler.metrics.flushErrors.Inc(1)
   192  			return
   193  		}
   194  
   195  		if err := w.handler.storage.Write(w.ctx, writeQuery); err != nil {
   196  			logger.Error("downsampler flush error failed write", zap.Error(err))
   197  			w.handler.metrics.flushErrors.Inc(1)
   198  			return
   199  		}
   200  
   201  		w.handler.metrics.flushSuccess.Inc(1)
   202  	})
   203  
   204  	return nil
   205  }
   206  
   207  func (w *downsamplerFlushHandlerWriter) Flush() error {
   208  	// NB(r): This is just simply waiting for inflight requests
   209  	// to complete since this flush handler isn't connection based.
   210  	w.wg.Wait()
   211  	return nil
   212  }
   213  
   214  func (w *downsamplerFlushHandlerWriter) Close() error {
   215  	// NB(r): This is a no-op since this flush handler isn't connection based.
   216  	return nil
   217  }