github.com/m3db/m3@v1.5.0/src/query/api/v1/handler/prometheus/remote/write.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package remote
    22  
    23  import (
    24  	"bytes"
    25  	"context"
    26  	"encoding/json"
    27  	"errors"
    28  	"fmt"
    29  	"io/ioutil"
    30  	"net/http"
    31  	"strings"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	"github.com/m3db/m3/src/cmd/services/m3coordinator/ingest"
    36  	"github.com/m3db/m3/src/dbnode/client"
    37  	"github.com/m3db/m3/src/metrics/policy"
    38  	"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
    39  	"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
    40  	"github.com/m3db/m3/src/query/api/v1/options"
    41  	"github.com/m3db/m3/src/query/api/v1/route"
    42  	"github.com/m3db/m3/src/query/generated/proto/prompb"
    43  	"github.com/m3db/m3/src/query/models"
    44  	"github.com/m3db/m3/src/query/storage"
    45  	"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
    46  	"github.com/m3db/m3/src/query/ts"
    47  	"github.com/m3db/m3/src/query/util/logging"
    48  	"github.com/m3db/m3/src/x/clock"
    49  	xerrors "github.com/m3db/m3/src/x/errors"
    50  	"github.com/m3db/m3/src/x/headers"
    51  	"github.com/m3db/m3/src/x/instrument"
    52  	xhttp "github.com/m3db/m3/src/x/net/http"
    53  	"github.com/m3db/m3/src/x/retry"
    54  	xsync "github.com/m3db/m3/src/x/sync"
    55  	xtime "github.com/m3db/m3/src/x/time"
    56  
    57  	"github.com/golang/protobuf/proto"
    58  	"github.com/uber-go/tally"
    59  	"go.uber.org/zap"
    60  )
    61  
    62  const (
    63  	// PromWriteURL is the url for the prom write handler
    64  	PromWriteURL = route.Prefix + "/prom/remote/write"
    65  
    66  	// PromWriteHTTPMethod is the HTTP method used with this resource.
    67  	PromWriteHTTPMethod = http.MethodPost
    68  
    69  	// emptyStoragePolicyVar for code readability.
    70  	emptyStoragePolicyVar = ""
    71  
    72  	// defaultForwardingTimeout is the default forwarding timeout.
    73  	defaultForwardingTimeout = 15 * time.Second
    74  
    75  	// maxLiteralIsTooLongLogCount is the number of times the time series labels should be logged
    76  	// upon "literal is too long" error.
    77  	maxLiteralIsTooLongLogCount = 10
    78  	// literalPrefixLength is the length of the label literal prefix that is logged upon
    79  	// "literal is too long" error.
    80  	literalPrefixLength = 100
    81  )
    82  
    83  var (
    84  	errNoDownsamplerAndWriter       = errors.New("no downsampler and writer set")
    85  	errNoTagOptions                 = errors.New("no tag options set")
    86  	errNoNowFn                      = errors.New("no now fn set")
    87  	errUnaggregatedStoragePolicySet = errors.New("storage policy should not be set for unaggregated metrics")
    88  
    89  	defaultForwardingRetryForever = false
    90  	defaultForwardingRetryJitter  = true
    91  	defaultForwardRetryConfig     = retry.Configuration{
    92  		InitialBackoff: time.Second * 2,
    93  		BackoffFactor:  2,
    94  		MaxRetries:     1,
    95  		Forever:        &defaultForwardingRetryForever,
    96  		Jitter:         &defaultForwardingRetryJitter,
    97  	}
    98  
    99  	defaultValue = ingest.IterValue{
   100  		Tags:       models.EmptyTags(),
   101  		Attributes: ts.DefaultSeriesAttributes(),
   102  		Metadata:   ts.Metadata{},
   103  	}
   104  
   105  	headerToMetricType = map[string]prompb.MetricType{
   106  		"counter":         prompb.MetricType_COUNTER,
   107  		"gauge":           prompb.MetricType_GAUGE,
   108  		"gauge_histogram": prompb.MetricType_GAUGE_HISTOGRAM,
   109  		"histogram":       prompb.MetricType_HISTOGRAM,
   110  		"info":            prompb.MetricType_INFO,
   111  		"stateset":        prompb.MetricType_STATESET,
   112  		"summary":         prompb.MetricType_SUMMARY,
   113  	}
   114  )
   115  
   116  // PromWriteHandler represents a handler for prometheus write endpoint.
   117  type PromWriteHandler struct {
   118  	downsamplerAndWriter   ingest.DownsamplerAndWriter
   119  	tagOptions             models.TagOptions
   120  	storeMetricsType       bool
   121  	forwarding             handleroptions.PromWriteHandlerForwardingOptions
   122  	forwardTimeout         time.Duration
   123  	forwardHTTPClient      *http.Client
   124  	forwardingBoundWorkers xsync.WorkerPool
   125  	forwardContext         context.Context
   126  	forwardRetrier         retry.Retrier
   127  	nowFn                  clock.NowFn
   128  	instrumentOpts         instrument.Options
   129  	metrics                promWriteMetrics
   130  
   131  	// Counting the number of times of "literal is too long" error for log sampling purposes.
   132  	numLiteralIsTooLong uint32
   133  }
   134  
   135  // NewPromWriteHandler returns a new instance of handler.
   136  func NewPromWriteHandler(options options.HandlerOptions) (http.Handler, error) {
   137  	var (
   138  		downsamplerAndWriter = options.DownsamplerAndWriter()
   139  		tagOptions           = options.TagOptions()
   140  		nowFn                = options.NowFn()
   141  		forwarding           = options.Config().WriteForwarding.PromRemoteWrite
   142  		instrumentOpts       = options.InstrumentOpts()
   143  	)
   144  
   145  	if downsamplerAndWriter == nil {
   146  		return nil, errNoDownsamplerAndWriter
   147  	}
   148  
   149  	if tagOptions == nil {
   150  		return nil, errNoTagOptions
   151  	}
   152  
   153  	if nowFn == nil {
   154  		return nil, errNoNowFn
   155  	}
   156  
   157  	scope := options.InstrumentOpts().
   158  		MetricsScope().
   159  		Tagged(map[string]string{"handler": "remote-write"})
   160  	metrics, err := newPromWriteMetrics(scope)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  
   165  	// Only use a forwarding worker pool if concurrency is bound, otherwise
   166  	// if unlimited we just spin up a goroutine for each incoming write.
   167  	var forwardingBoundWorkers xsync.WorkerPool
   168  	if v := forwarding.MaxConcurrency; v > 0 {
   169  		forwardingBoundWorkers = xsync.NewWorkerPool(v)
   170  		forwardingBoundWorkers.Init()
   171  	}
   172  
   173  	forwardTimeout := defaultForwardingTimeout
   174  	if v := forwarding.Timeout; v > 0 {
   175  		forwardTimeout = v
   176  	}
   177  
   178  	forwardHTTPOpts := xhttp.DefaultHTTPClientOptions()
   179  	forwardHTTPOpts.DisableCompression = true // Already snappy compressed.
   180  	forwardHTTPOpts.RequestTimeout = forwardTimeout
   181  
   182  	forwardRetryConfig := defaultForwardRetryConfig
   183  	if forwarding.Retry != nil {
   184  		forwardRetryConfig = *forwarding.Retry
   185  	}
   186  	forwardRetryOpts := forwardRetryConfig.NewOptions(
   187  		scope.SubScope("forwarding-retry"),
   188  	)
   189  
   190  	return &PromWriteHandler{
   191  		downsamplerAndWriter:   downsamplerAndWriter,
   192  		tagOptions:             tagOptions,
   193  		storeMetricsType:       options.StoreMetricsType(),
   194  		forwarding:             forwarding,
   195  		forwardTimeout:         forwardTimeout,
   196  		forwardHTTPClient:      xhttp.NewHTTPClient(forwardHTTPOpts),
   197  		forwardingBoundWorkers: forwardingBoundWorkers,
   198  		forwardContext:         context.Background(),
   199  		forwardRetrier:         retry.NewRetrier(forwardRetryOpts),
   200  		nowFn:                  nowFn,
   201  		metrics:                metrics,
   202  		instrumentOpts:         instrumentOpts,
   203  	}, nil
   204  }
   205  
   206  type promWriteMetrics struct {
   207  	writeSuccess             tally.Counter
   208  	writeErrorsServer        tally.Counter
   209  	writeErrorsClient        tally.Counter
   210  	writeBatchLatency        tally.Histogram
   211  	writeBatchLatencyBuckets tally.DurationBuckets
   212  	ingestLatency            tally.Histogram
   213  	ingestLatencyBuckets     tally.DurationBuckets
   214  	forwardSuccess           tally.Counter
   215  	forwardErrors            tally.Counter
   216  	forwardDropped           tally.Counter
   217  	forwardLatency           tally.Histogram
   218  }
   219  
   220  func (m *promWriteMetrics) incError(err error) {
   221  	if xhttp.IsClientError(err) {
   222  		m.writeErrorsClient.Inc(1)
   223  	} else {
   224  		m.writeErrorsServer.Inc(1)
   225  	}
   226  }
   227  
   228  func newPromWriteMetrics(scope tally.Scope) (promWriteMetrics, error) {
   229  	buckets, err := ingest.NewLatencyBuckets()
   230  	if err != nil {
   231  		return promWriteMetrics{}, err
   232  	}
   233  	return promWriteMetrics{
   234  		writeSuccess:             scope.SubScope("write").Counter("success"),
   235  		writeErrorsServer:        scope.SubScope("write").Tagged(map[string]string{"code": "5XX"}).Counter("errors"),
   236  		writeErrorsClient:        scope.SubScope("write").Tagged(map[string]string{"code": "4XX"}).Counter("errors"),
   237  		writeBatchLatency:        scope.SubScope("write").Histogram("batch-latency", buckets.WriteLatencyBuckets),
   238  		writeBatchLatencyBuckets: buckets.WriteLatencyBuckets,
   239  		ingestLatency:            scope.SubScope("ingest").Histogram("latency", buckets.IngestLatencyBuckets),
   240  		ingestLatencyBuckets:     buckets.IngestLatencyBuckets,
   241  		forwardSuccess:           scope.SubScope("forward").Counter("success"),
   242  		forwardErrors:            scope.SubScope("forward").Counter("errors"),
   243  		forwardDropped:           scope.SubScope("forward").Counter("dropped"),
   244  		forwardLatency:           scope.SubScope("forward").Histogram("latency", buckets.WriteLatencyBuckets),
   245  	}, nil
   246  }
   247  
   248  func (h *PromWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
   249  	batchRequestStopwatch := h.metrics.writeBatchLatency.Start()
   250  	defer batchRequestStopwatch.Stop()
   251  
   252  	checkedReq, err := h.checkedParseRequest(r)
   253  	if err != nil {
   254  		h.metrics.incError(err)
   255  		xhttp.WriteError(w, err)
   256  		return
   257  	}
   258  
   259  	var (
   260  		req    = checkedReq.Request
   261  		opts   = checkedReq.Options
   262  		result = checkedReq.CompressResult
   263  	)
   264  	// Begin async forwarding.
   265  	// NB(r): Be careful about not returning buffers to pool
   266  	// if the request bodies ever get pooled until after
   267  	// forwarding completes.
   268  	if targets := h.forwarding.Targets; len(targets) > 0 {
   269  		for _, target := range targets {
   270  			target := target // Capture for lambda.
   271  			forward := func() {
   272  				now := h.nowFn()
   273  				err := h.forwardRetrier.Attempt(func() error {
   274  					// Consider propagating baggage without tying
   275  					// context to request context in future.
   276  					ctx, cancel := context.WithTimeout(h.forwardContext, h.forwardTimeout)
   277  					defer cancel()
   278  					return h.forward(ctx, result, r.Header, target)
   279  				})
   280  
   281  				// Record forward ingestion delay.
   282  				// NB: this includes any time for retries.
   283  				for _, series := range req.Timeseries {
   284  					for _, sample := range series.Samples {
   285  						age := now.Sub(storage.PromTimestampToTime(sample.Timestamp))
   286  						h.metrics.forwardLatency.RecordDuration(age)
   287  					}
   288  				}
   289  
   290  				if err != nil {
   291  					h.metrics.forwardErrors.Inc(1)
   292  					logger := logging.WithContext(h.forwardContext, h.instrumentOpts)
   293  					logger.Error("forward error", zap.Error(err))
   294  					return
   295  				}
   296  
   297  				h.metrics.forwardSuccess.Inc(1)
   298  			}
   299  
   300  			spawned := false
   301  			if h.forwarding.MaxConcurrency > 0 {
   302  				spawned = h.forwardingBoundWorkers.GoIfAvailable(forward)
   303  			} else {
   304  				go forward()
   305  				spawned = true
   306  			}
   307  			if !spawned {
   308  				h.metrics.forwardDropped.Inc(1)
   309  			}
   310  		}
   311  	}
   312  
   313  	batchErr := h.write(r.Context(), req, opts)
   314  
   315  	// Record ingestion delay latency
   316  	now := h.nowFn()
   317  	for _, series := range req.Timeseries {
   318  		for _, sample := range series.Samples {
   319  			age := now.Sub(storage.PromTimestampToTime(sample.Timestamp))
   320  			h.metrics.ingestLatency.RecordDuration(age)
   321  		}
   322  	}
   323  
   324  	if batchErr != nil {
   325  		var (
   326  			errs                 = batchErr.Errors()
   327  			lastRegularErr       string
   328  			lastBadRequestErr    string
   329  			numRegular           int
   330  			numBadRequest        int
   331  			numResourceExhausted int
   332  		)
   333  		for _, err := range errs {
   334  			switch {
   335  			case client.IsResourceExhaustedError(err):
   336  				numResourceExhausted++
   337  				lastBadRequestErr = err.Error()
   338  			case client.IsBadRequestError(err):
   339  				numBadRequest++
   340  				lastBadRequestErr = err.Error()
   341  			case xerrors.IsInvalidParams(err):
   342  				numBadRequest++
   343  				lastBadRequestErr = err.Error()
   344  			default:
   345  				numRegular++
   346  				lastRegularErr = err.Error()
   347  			}
   348  		}
   349  
   350  		var status int
   351  		switch {
   352  		case numBadRequest == len(errs):
   353  			status = http.StatusBadRequest
   354  		case numResourceExhausted > 0:
   355  			status = http.StatusTooManyRequests
   356  		default:
   357  			status = http.StatusInternalServerError
   358  		}
   359  
   360  		logger := logging.WithContext(r.Context(), h.instrumentOpts)
   361  		logger.Error("write error",
   362  			zap.String("remoteAddr", r.RemoteAddr),
   363  			zap.Int("httpResponseStatusCode", status),
   364  			zap.Int("numResourceExhaustedErrors", numResourceExhausted),
   365  			zap.Int("numRegularErrors", numRegular),
   366  			zap.Int("numBadRequestErrors", numBadRequest),
   367  			zap.String("lastRegularError", lastRegularErr),
   368  			zap.String("lastBadRequestErr", lastBadRequestErr))
   369  
   370  		var resultErrMessage string
   371  		if lastRegularErr != "" {
   372  			resultErrMessage = fmt.Sprintf("retryable_errors: count=%d, last=%s",
   373  				numRegular, lastRegularErr)
   374  		}
   375  		if lastBadRequestErr != "" {
   376  			var sep string
   377  			if lastRegularErr != "" {
   378  				sep = ", "
   379  			}
   380  			resultErrMessage = fmt.Sprintf("%s%sbad_request_errors: count=%d, last=%s",
   381  				resultErrMessage, sep, numBadRequest, lastBadRequestErr)
   382  		}
   383  
   384  		resultError := xhttp.NewError(errors.New(resultErrMessage), status)
   385  		h.metrics.incError(resultError)
   386  		xhttp.WriteError(w, resultError)
   387  		return
   388  	}
   389  
   390  	// NB(schallert): this is frustrating but if we don't explicitly write an HTTP
   391  	// status code (or via Write()), OpenTracing middleware reports code=0 and
   392  	// shows up as error.
   393  	w.WriteHeader(200)
   394  	h.metrics.writeSuccess.Inc(1)
   395  }
   396  
   397  type parseRequestResult struct {
   398  	Request        *prompb.WriteRequest
   399  	Options        ingest.WriteOptions
   400  	CompressResult prometheus.ParsePromCompressedRequestResult
   401  }
   402  
   403  func (h *PromWriteHandler) checkedParseRequest(
   404  	r *http.Request,
   405  ) (parseRequestResult, error) {
   406  	result, err := h.parseRequest(r)
   407  	if err != nil {
   408  		// Always invalid request if parsing fails params.
   409  		return parseRequestResult{}, xerrors.NewInvalidParamsError(err)
   410  	}
   411  	return result, nil
   412  }
   413  
   414  // parseRequest extracts the Prometheus write request from the request body and
   415  // headers. WARNING: it is not guaranteed that the tags returned in the request
   416  // body are in sorted order. It is expected that the caller ensures the tags are
   417  // sorted before passing them to storage, which currently happens in write() ->
   418  // newTSPromIter() -> storage.PromLabelsToM3Tags() -> tags.AddTags(). This is
   419  // the only path written metrics are processed, but future write paths must
   420  // uphold the same guarantees.
   421  func (h *PromWriteHandler) parseRequest(
   422  	r *http.Request,
   423  ) (parseRequestResult, error) {
   424  	var opts ingest.WriteOptions
   425  	if v := strings.TrimSpace(r.Header.Get(headers.MetricsTypeHeader)); v != "" {
   426  		// Allow the metrics type and storage policies to override
   427  		// the default rules and policies if specified.
   428  		metricsType, err := storagemetadata.ParseMetricsType(v)
   429  		if err != nil {
   430  			return parseRequestResult{}, err
   431  		}
   432  
   433  		// Ensure ingest options specify we are overriding the
   434  		// downsampling rules with zero rules to be applied (so
   435  		// only direct writes will be made).
   436  		opts.DownsampleOverride = true
   437  		opts.DownsampleMappingRules = nil
   438  
   439  		strPolicy := strings.TrimSpace(r.Header.Get(headers.MetricsStoragePolicyHeader))
   440  		switch metricsType {
   441  		case storagemetadata.UnaggregatedMetricsType:
   442  			if strPolicy != emptyStoragePolicyVar {
   443  				return parseRequestResult{}, errUnaggregatedStoragePolicySet
   444  			}
   445  		default:
   446  			parsed, err := policy.ParseStoragePolicy(strPolicy)
   447  			if err != nil {
   448  				err = fmt.Errorf("could not parse storage policy: %v", err)
   449  				return parseRequestResult{}, err
   450  			}
   451  
   452  			// Make sure this specific storage policy is used for the writes.
   453  			opts.WriteOverride = true
   454  			opts.WriteStoragePolicies = policy.StoragePolicies{
   455  				parsed,
   456  			}
   457  		}
   458  	}
   459  	if v := strings.TrimSpace(r.Header.Get(headers.WriteTypeHeader)); v != "" {
   460  		switch v {
   461  		case headers.DefaultWriteType:
   462  		case headers.AggregateWriteType:
   463  			opts.WriteOverride = true
   464  			opts.WriteStoragePolicies = policy.StoragePolicies{}
   465  		default:
   466  			err := fmt.Errorf("unrecognized write type: %s", v)
   467  			return parseRequestResult{}, err
   468  		}
   469  	}
   470  
   471  	result, err := prometheus.ParsePromCompressedRequest(r)
   472  	if err != nil {
   473  		return parseRequestResult{}, err
   474  	}
   475  
   476  	var req prompb.WriteRequest
   477  	if err := proto.Unmarshal(result.UncompressedBody, &req); err != nil {
   478  		return parseRequestResult{}, err
   479  	}
   480  
   481  	if mapStr := r.Header.Get(headers.MapTagsByJSONHeader); mapStr != "" {
   482  		var opts handleroptions.MapTagsOptions
   483  		if err := json.Unmarshal([]byte(mapStr), &opts); err != nil {
   484  			return parseRequestResult{}, err
   485  		}
   486  
   487  		if err := mapTags(&req, opts); err != nil {
   488  			return parseRequestResult{}, err
   489  		}
   490  	}
   491  
   492  	if promType := r.Header.Get(headers.PromTypeHeader); promType != "" {
   493  		tp, ok := headerToMetricType[strings.ToLower(promType)]
   494  		if !ok {
   495  			return parseRequestResult{}, fmt.Errorf("unknown prom metric type %s", promType)
   496  		}
   497  		for i := range req.Timeseries {
   498  			req.Timeseries[i].Type = tp
   499  		}
   500  	}
   501  
   502  	// Check if any of the labels exceed literal length limits and occasionally print them
   503  	// in a log message for debugging purposes.
   504  	maxTagLiteralLength := int(h.tagOptions.MaxTagLiteralLength())
   505  	for _, ts := range req.Timeseries {
   506  		for _, l := range ts.Labels {
   507  			if len(l.Name) > maxTagLiteralLength || len(l.Value) > maxTagLiteralLength {
   508  				h.maybeLogLabelsWithTooLongLiterals(h.instrumentOpts.Logger(), l)
   509  				err := fmt.Errorf("label literal is too long: nameLength=%d, valueLength=%d, maxLength=%d",
   510  					len(l.Name), len(l.Value), maxTagLiteralLength)
   511  				return parseRequestResult{}, err
   512  			}
   513  		}
   514  	}
   515  
   516  	return parseRequestResult{
   517  		Request:        &req,
   518  		Options:        opts,
   519  		CompressResult: result,
   520  	}, nil
   521  }
   522  
   523  func (h *PromWriteHandler) write(
   524  	ctx context.Context,
   525  	r *prompb.WriteRequest,
   526  	opts ingest.WriteOptions,
   527  ) ingest.BatchError {
   528  	iter, err := newPromTSIter(r.Timeseries, h.tagOptions, h.storeMetricsType)
   529  	if err != nil {
   530  		var errs xerrors.MultiError
   531  		return errs.Add(err)
   532  	}
   533  	return h.downsamplerAndWriter.WriteBatch(ctx, iter, opts)
   534  }
   535  
   536  func (h *PromWriteHandler) forward(
   537  	ctx context.Context,
   538  	request prometheus.ParsePromCompressedRequestResult,
   539  	header http.Header,
   540  	target handleroptions.PromWriteHandlerForwardTargetOptions,
   541  ) error {
   542  	method := target.Method
   543  	if method == "" {
   544  		method = http.MethodPost
   545  	}
   546  	url := target.URL
   547  	req, err := http.NewRequest(method, url, bytes.NewReader(request.CompressedBody))
   548  	if err != nil {
   549  		return err
   550  	}
   551  
   552  	// There are multiple headers that impact coordinator behavior on the write
   553  	// (map tags, storage policy, etc.) that we must forward to the target
   554  	// coordinator to guarantee same behavior as the coordinator that originally
   555  	// received the request.
   556  	if header != nil {
   557  		for h := range header {
   558  			if strings.HasPrefix(h, headers.M3HeaderPrefix) {
   559  				req.Header.Add(h, header.Get(h))
   560  			}
   561  		}
   562  	}
   563  
   564  	if targetHeaders := target.Headers; targetHeaders != nil {
   565  		// If headers set, attach to request.
   566  		for name, value := range targetHeaders {
   567  			req.Header.Add(name, value)
   568  		}
   569  	}
   570  
   571  	resp, err := h.forwardHTTPClient.Do(req.WithContext(ctx))
   572  	if err != nil {
   573  		return err
   574  	}
   575  
   576  	defer resp.Body.Close()
   577  
   578  	if resp.StatusCode/100 != 2 {
   579  		response, err := ioutil.ReadAll(resp.Body)
   580  		if err != nil {
   581  			response = []byte(fmt.Sprintf("error reading body: %v", err))
   582  		}
   583  		return fmt.Errorf("expected status code 2XX: actual=%v, method=%v, url=%v, resp=%s",
   584  			resp.StatusCode, method, url, response)
   585  	}
   586  
   587  	return nil
   588  }
   589  
   590  func (h *PromWriteHandler) maybeLogLabelsWithTooLongLiterals(logger *zap.Logger, label prompb.Label) {
   591  	if atomic.AddUint32(&h.numLiteralIsTooLong, 1) > maxLiteralIsTooLongLogCount {
   592  		return
   593  	}
   594  
   595  	safePrefix := func(b []byte, l int) []byte {
   596  		if len(b) <= l {
   597  			return b
   598  		}
   599  		return b[:l]
   600  	}
   601  
   602  	logger.Warn("label exceeds literal length limits",
   603  		zap.String("namePrefix", string(safePrefix(label.Name, literalPrefixLength))),
   604  		zap.Int("nameLength", len(label.Name)),
   605  		zap.String("valuePrefix", string(safePrefix(label.Value, literalPrefixLength))),
   606  		zap.Int("valueLength", len(label.Value)),
   607  	)
   608  }
   609  
   610  func newPromTSIter(
   611  	timeseries []prompb.TimeSeries,
   612  	tagOpts models.TagOptions,
   613  	storeMetricsType bool,
   614  ) (*promTSIter, error) {
   615  	// Construct the tags and datapoints upfront so that if the iterator
   616  	// is reset, we don't have to generate them twice.
   617  	var (
   618  		tags             = make([]models.Tags, 0, len(timeseries))
   619  		datapoints       = make([]ts.Datapoints, 0, len(timeseries))
   620  		seriesAttributes = make([]ts.SeriesAttributes, 0, len(timeseries))
   621  	)
   622  
   623  	graphiteTagOpts := tagOpts.SetIDSchemeType(models.TypeGraphite)
   624  	for _, promTS := range timeseries {
   625  		attributes, err := storage.PromTimeSeriesToSeriesAttributes(promTS)
   626  		if err != nil {
   627  			return nil, err
   628  		}
   629  
   630  		// Set the tag options based on the incoming source.
   631  		opts := tagOpts
   632  		if attributes.Source == ts.SourceTypeGraphite {
   633  			opts = graphiteTagOpts
   634  		}
   635  
   636  		seriesAttributes = append(seriesAttributes, attributes)
   637  		tags = append(tags, storage.PromLabelsToM3Tags(promTS.Labels, opts))
   638  		datapoints = append(datapoints, storage.PromSamplesToM3Datapoints(promTS.Samples))
   639  	}
   640  
   641  	return &promTSIter{
   642  		attributes:       seriesAttributes,
   643  		idx:              -1,
   644  		tags:             tags,
   645  		datapoints:       datapoints,
   646  		storeMetricsType: storeMetricsType,
   647  	}, nil
   648  }
   649  
   650  type promTSIter struct {
   651  	idx        int
   652  	err        error
   653  	attributes []ts.SeriesAttributes
   654  	tags       []models.Tags
   655  	datapoints []ts.Datapoints
   656  	metadatas  []ts.Metadata
   657  	annotation []byte
   658  
   659  	storeMetricsType bool
   660  }
   661  
   662  func (i *promTSIter) Next() bool {
   663  	if i.err != nil {
   664  		return false
   665  	}
   666  
   667  	i.idx++
   668  	if i.idx >= len(i.tags) {
   669  		return false
   670  	}
   671  
   672  	if !i.storeMetricsType {
   673  		return true
   674  	}
   675  
   676  	annotationPayload, err := storage.SeriesAttributesToAnnotationPayload(i.attributes[i.idx])
   677  	if err != nil {
   678  		i.err = err
   679  		return false
   680  	}
   681  
   682  	i.annotation, err = annotationPayload.Marshal()
   683  	if err != nil {
   684  		i.err = err
   685  		return false
   686  	}
   687  
   688  	if len(i.annotation) == 0 {
   689  		i.annotation = nil
   690  	}
   691  
   692  	return true
   693  }
   694  
   695  func (i *promTSIter) Current() ingest.IterValue {
   696  	if len(i.tags) == 0 || i.idx < 0 || i.idx >= len(i.tags) {
   697  		return defaultValue
   698  	}
   699  
   700  	value := ingest.IterValue{
   701  		Tags:       i.tags[i.idx],
   702  		Datapoints: i.datapoints[i.idx],
   703  		Attributes: i.attributes[i.idx],
   704  		Unit:       xtime.Millisecond,
   705  		Annotation: i.annotation,
   706  	}
   707  	if i.idx < len(i.metadatas) {
   708  		value.Metadata = i.metadatas[i.idx]
   709  	}
   710  	return value
   711  }
   712  
   713  func (i *promTSIter) Reset() error {
   714  	i.idx = -1
   715  	i.err = nil
   716  	i.annotation = nil
   717  
   718  	return nil
   719  }
   720  
   721  func (i *promTSIter) Error() error {
   722  	return i.err
   723  }
   724  
   725  func (i *promTSIter) SetCurrentMetadata(metadata ts.Metadata) {
   726  	if len(i.metadatas) == 0 {
   727  		i.metadatas = make([]ts.Metadata, len(i.tags))
   728  	}
   729  	if i.idx < 0 || i.idx >= len(i.metadatas) {
   730  		return
   731  	}
   732  	i.metadatas[i.idx] = metadata
   733  }