github.com/thanos-io/thanos@v0.32.5/internal/cortex/querier/queryrange/query_range.go (about)

     1  // Copyright (c) The Cortex Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package queryrange
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	stdjson "encoding/json"
    10  	"fmt"
    11  	"io/ioutil"
    12  	"math"
    13  	"net/http"
    14  	"net/url"
    15  	"sort"
    16  	"strconv"
    17  	"strings"
    18  	"time"
    19  	"unsafe"
    20  
    21  	"github.com/gogo/protobuf/proto"
    22  	"github.com/gogo/status"
    23  	jsoniter "github.com/json-iterator/go"
    24  	"github.com/opentracing/opentracing-go"
    25  	otlog "github.com/opentracing/opentracing-go/log"
    26  	"github.com/pkg/errors"
    27  	"github.com/prometheus/common/model"
    28  	"github.com/prometheus/prometheus/model/timestamp"
    29  	"github.com/weaveworks/common/httpgrpc"
    30  
    31  	"github.com/thanos-io/thanos/internal/cortex/cortexpb"
    32  	"github.com/thanos-io/thanos/internal/cortex/util"
    33  	"github.com/thanos-io/thanos/internal/cortex/util/spanlogger"
    34  )
    35  
    36  // StatusSuccess Prometheus success result.
    37  const StatusSuccess = "success"
    38  
    39  var (
    40  	matrix = model.ValMatrix.String()
    41  	json   = jsoniter.Config{
    42  		EscapeHTML:             false, // No HTML in our responses.
    43  		SortMapKeys:            true,
    44  		ValidateJsonRawMessage: true,
    45  	}.Froze()
    46  	errEndBeforeStart = httpgrpc.Errorf(http.StatusBadRequest, "end timestamp must not be before start time")
    47  	errNegativeStep   = httpgrpc.Errorf(http.StatusBadRequest, "zero or negative query resolution step widths are not accepted. Try a positive integer")
    48  	errStepTooSmall   = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
    49  
    50  	// PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses.
    51  	PrometheusCodec Codec = &prometheusCodec{}
    52  
    53  	// Name of the cache control header.
    54  	cacheControlHeader = "Cache-Control"
    55  )
    56  
    57  // Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares.
    58  type Codec interface {
    59  	Merger
    60  	// DecodeRequest decodes a Request from an http request.
    61  	DecodeRequest(_ context.Context, request *http.Request, forwardHeaders []string) (Request, error)
    62  	// DecodeResponse decodes a Response from an http response.
    63  	// The original request is also passed as a parameter this is useful for implementation that needs the request
    64  	// to merge result or build the result correctly.
    65  	DecodeResponse(context.Context, *http.Response, Request) (Response, error)
    66  	// EncodeRequest encodes a Request into an http request.
    67  	EncodeRequest(context.Context, Request) (*http.Request, error)
    68  	// EncodeResponse encodes a Response into an http response.
    69  	EncodeResponse(context.Context, Response) (*http.Response, error)
    70  }
    71  
    72  // Merger is used by middlewares making multiple requests to merge back all responses into a single one.
    73  type Merger interface {
    74  	// MergeResponse merges responses from multiple requests into a single Response
    75  	MergeResponse(Request, ...Response) (Response, error)
    76  }
    77  
    78  // Request represents a query range request that can be process by middlewares.
    79  type Request interface {
    80  	// GetStart returns the start timestamp of the request in milliseconds.
    81  	GetStart() int64
    82  	// GetEnd returns the end timestamp of the request in milliseconds.
    83  	GetEnd() int64
    84  	// GetStep returns the step of the request in milliseconds.
    85  	GetStep() int64
    86  	// GetQuery returns the query of the request.
    87  	GetQuery() string
    88  	// GetCachingOptions returns the caching options.
    89  	GetCachingOptions() CachingOptions
    90  	// WithStartEnd clone the current request with different start and end timestamp.
    91  	WithStartEnd(startTime int64, endTime int64) Request
    92  	// WithQuery clone the current request with a different query.
    93  	WithQuery(string) Request
    94  	proto.Message
    95  	// LogToSpan writes information about this request to an OpenTracing span
    96  	LogToSpan(opentracing.Span)
    97  	// GetStats returns the stats of the request.
    98  	GetStats() string
    99  	// WithStats clones the current `PrometheusRequest` with a new stats.
   100  	WithStats(stats string) Request
   101  }
   102  
   103  // Response represents a query range response.
   104  type Response interface {
   105  	proto.Message
   106  	// GetHeaders returns the HTTP headers in the response.
   107  	GetHeaders() []*PrometheusResponseHeader
   108  	// GetStats returns the Prometheus query stats in the response.
   109  	GetStats() *PrometheusResponseStats
   110  }
   111  
   112  type prometheusCodec struct{}
   113  
   114  // WithStartEnd clones the current `PrometheusRequest` with a new `start` and `end` timestamp.
   115  func (q *PrometheusRequest) WithStartEnd(start int64, end int64) Request {
   116  	new := *q
   117  	new.Start = start
   118  	new.End = end
   119  	return &new
   120  }
   121  
   122  // WithQuery clones the current `PrometheusRequest` with a new query.
   123  func (q *PrometheusRequest) WithQuery(query string) Request {
   124  	new := *q
   125  	new.Query = query
   126  	return &new
   127  }
   128  
   129  // WithStats clones the current `PrometheusRequest` with a new stats.
   130  func (q *PrometheusRequest) WithStats(stats string) Request {
   131  	new := *q
   132  	new.Stats = stats
   133  	return &new
   134  }
   135  
   136  // LogToSpan logs the current `PrometheusRequest` parameters to the specified span.
   137  func (q *PrometheusRequest) LogToSpan(sp opentracing.Span) {
   138  	sp.LogFields(
   139  		otlog.String("query", q.GetQuery()),
   140  		otlog.String("start", timestamp.Time(q.GetStart()).String()),
   141  		otlog.String("end", timestamp.Time(q.GetEnd()).String()),
   142  		otlog.Int64("step (ms)", q.GetStep()),
   143  	)
   144  }
   145  
   146  type byFirstTime []*PrometheusResponse
   147  
   148  func (a byFirstTime) Len() int           { return len(a) }
   149  func (a byFirstTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
   150  func (a byFirstTime) Less(i, j int) bool { return a[i].minTime() < a[j].minTime() }
   151  
   152  func (resp *PrometheusResponse) minTime() int64 {
   153  	result := resp.Data.Result
   154  	if len(result) == 0 {
   155  		return -1
   156  	}
   157  	if len(result[0].Samples) == 0 && len(result[0].Histograms) == 0 {
   158  		return -1
   159  	}
   160  
   161  	if len(result[0].Samples) == 0 {
   162  		return result[0].Histograms[0].Timestamp
   163  	}
   164  
   165  	if len(result[0].Histograms) == 0 {
   166  		return result[0].Samples[0].TimestampMs
   167  	}
   168  
   169  	return minInt64(result[0].Samples[0].TimestampMs, result[0].Histograms[0].Timestamp)
   170  }
   171  
   172  func minInt64(a, b int64) int64 {
   173  	if a < b {
   174  		return a
   175  	}
   176  	return b
   177  }
   178  
   179  func (resp *PrometheusResponse) GetStats() *PrometheusResponseStats {
   180  	return resp.Data.Stats
   181  }
   182  
   183  func (resp *PrometheusInstantQueryResponse) GetStats() *PrometheusResponseStats {
   184  	return resp.Data.Stats
   185  }
   186  
   187  // NewEmptyPrometheusResponse returns an empty successful Prometheus query range response.
   188  func NewEmptyPrometheusResponse() *PrometheusResponse {
   189  	return &PrometheusResponse{
   190  		Status: StatusSuccess,
   191  		Data: PrometheusData{
   192  			ResultType: model.ValMatrix.String(),
   193  			Result:     []SampleStream{},
   194  		},
   195  	}
   196  }
   197  
   198  // NewEmptyPrometheusInstantQueryResponse returns an empty successful Prometheus query range response.
   199  func NewEmptyPrometheusInstantQueryResponse() *PrometheusInstantQueryResponse {
   200  	return &PrometheusInstantQueryResponse{
   201  		Status: StatusSuccess,
   202  		Data: PrometheusInstantQueryData{
   203  			ResultType: model.ValVector.String(),
   204  			Result: PrometheusInstantQueryResult{
   205  				Result: &PrometheusInstantQueryResult_Vector{},
   206  			},
   207  		},
   208  	}
   209  }
   210  
   211  func (prometheusCodec) MergeResponse(_ Request, responses ...Response) (Response, error) {
   212  	if len(responses) == 0 {
   213  		return NewEmptyPrometheusResponse(), nil
   214  	}
   215  
   216  	promResponses := make([]*PrometheusResponse, 0, len(responses))
   217  	// we need to pass on all the headers for results cache gen numbers.
   218  	var resultsCacheGenNumberHeaderValues []string
   219  
   220  	for _, res := range responses {
   221  		promResponses = append(promResponses, res.(*PrometheusResponse))
   222  		resultsCacheGenNumberHeaderValues = append(resultsCacheGenNumberHeaderValues, getHeaderValuesWithName(res, ResultsCacheGenNumberHeaderName)...)
   223  	}
   224  
   225  	// Merge the responses.
   226  	sort.Sort(byFirstTime(promResponses))
   227  
   228  	var explanation *Explanation
   229  	for i := range promResponses {
   230  		if promResponses[i].Data.GetExplanation() != nil {
   231  			explanation = promResponses[i].Data.GetExplanation()
   232  			break
   233  		}
   234  	}
   235  
   236  	response := PrometheusResponse{
   237  		Status: StatusSuccess,
   238  		Data: PrometheusData{
   239  			ResultType:  model.ValMatrix.String(),
   240  			Result:      matrixMerge(promResponses),
   241  			Stats:       StatsMerge(responses),
   242  			Explanation: explanation,
   243  		},
   244  	}
   245  
   246  	if len(resultsCacheGenNumberHeaderValues) != 0 {
   247  		response.Headers = []*PrometheusResponseHeader{{
   248  			Name:   ResultsCacheGenNumberHeaderName,
   249  			Values: resultsCacheGenNumberHeaderValues,
   250  		}}
   251  	}
   252  
   253  	return &response, nil
   254  }
   255  
   256  func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwardHeaders []string) (Request, error) {
   257  	var result PrometheusRequest
   258  	var err error
   259  	result.Start, err = util.ParseTime(r.FormValue("start"))
   260  	if err != nil {
   261  		return nil, decorateWithParamName(err, "start")
   262  	}
   263  
   264  	result.End, err = util.ParseTime(r.FormValue("end"))
   265  	if err != nil {
   266  		return nil, decorateWithParamName(err, "end")
   267  	}
   268  
   269  	if result.End < result.Start {
   270  		return nil, errEndBeforeStart
   271  	}
   272  
   273  	result.Step, err = parseDurationMs(r.FormValue("step"))
   274  	if err != nil {
   275  		return nil, decorateWithParamName(err, "step")
   276  	}
   277  
   278  	if result.Step <= 0 {
   279  		return nil, errNegativeStep
   280  	}
   281  
   282  	// For safety, limit the number of returned points per timeseries.
   283  	// This is sufficient for 60s resolution for a week or 1h resolution for a year.
   284  	if (result.End-result.Start)/result.Step > 11000 {
   285  		return nil, errStepTooSmall
   286  	}
   287  
   288  	result.Query = r.FormValue("query")
   289  	result.Stats = r.FormValue("stats")
   290  	result.Path = r.URL.Path
   291  
   292  	// Include the specified headers from http request in prometheusRequest.
   293  	for _, header := range forwardHeaders {
   294  		for h, hv := range r.Header {
   295  			if strings.EqualFold(h, header) {
   296  				result.Headers = append(result.Headers, &PrometheusRequestHeader{Name: h, Values: hv})
   297  				break
   298  			}
   299  		}
   300  	}
   301  
   302  	for _, value := range r.Header.Values(cacheControlHeader) {
   303  		if strings.Contains(value, noStoreValue) {
   304  			result.CachingOptions.Disabled = true
   305  			break
   306  		}
   307  	}
   308  
   309  	return &result, nil
   310  }
   311  
   312  func (prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Request, error) {
   313  	promReq, ok := r.(*PrometheusRequest)
   314  	if !ok {
   315  		return nil, httpgrpc.Errorf(http.StatusBadRequest, "invalid request format")
   316  	}
   317  	params := url.Values{
   318  		"start": []string{encodeTime(promReq.Start)},
   319  		"end":   []string{encodeTime(promReq.End)},
   320  		"step":  []string{encodeDurationMs(promReq.Step)},
   321  		"query": []string{promReq.Query},
   322  		"stats": []string{promReq.Stats},
   323  	}
   324  	u := &url.URL{
   325  		Path:     promReq.Path,
   326  		RawQuery: params.Encode(),
   327  	}
   328  	var h = http.Header{}
   329  
   330  	for _, hv := range promReq.Headers {
   331  		for _, v := range hv.Values {
   332  			h.Add(hv.Name, v)
   333  		}
   334  	}
   335  
   336  	req := &http.Request{
   337  		Method:     "GET",
   338  		RequestURI: u.String(), // This is what the httpgrpc code looks at.
   339  		URL:        u,
   340  		Body:       http.NoBody,
   341  		Header:     h,
   342  	}
   343  
   344  	return req.WithContext(ctx), nil
   345  }
   346  
   347  func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) {
   348  	if r.StatusCode/100 != 2 {
   349  		body, _ := ioutil.ReadAll(r.Body)
   350  		return nil, httpgrpc.Errorf(r.StatusCode, string(body))
   351  	}
   352  	log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck
   353  	defer log.Finish()
   354  
   355  	buf, err := BodyBuffer(r)
   356  	if err != nil {
   357  		log.Error(err)
   358  		return nil, err
   359  	}
   360  	log.LogFields(otlog.Int("bytes", len(buf)))
   361  
   362  	var resp PrometheusResponse
   363  	if err := json.Unmarshal(buf, &resp); err != nil {
   364  		return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
   365  	}
   366  
   367  	for h, hv := range r.Header {
   368  		resp.Headers = append(resp.Headers, &PrometheusResponseHeader{Name: h, Values: hv})
   369  	}
   370  	return &resp, nil
   371  }
   372  
   373  // Buffer can be used to read a response body.
   374  // This allows to avoid reading the body multiple times from the `http.Response.Body`.
   375  type Buffer interface {
   376  	Bytes() []byte
   377  }
   378  
   379  func BodyBuffer(res *http.Response) ([]byte, error) {
   380  	// Attempt to cast the response body to a Buffer and use it if possible.
   381  	// This is because the frontend may have already read the body and buffered it.
   382  	if buffer, ok := res.Body.(Buffer); ok {
   383  		return buffer.Bytes(), nil
   384  	}
   385  	// Preallocate the buffer with the exact size so we don't waste allocations
   386  	// while progressively growing an initial small buffer. The buffer capacity
   387  	// is increased by MinRead to avoid extra allocations due to how ReadFrom()
   388  	// internally works.
   389  	buf := bytes.NewBuffer(make([]byte, 0, res.ContentLength+bytes.MinRead))
   390  	if _, err := buf.ReadFrom(res.Body); err != nil {
   391  		return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
   392  	}
   393  	return buf.Bytes(), nil
   394  }
   395  
   396  func (prometheusCodec) EncodeResponse(ctx context.Context, res Response) (*http.Response, error) {
   397  	sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse")
   398  	defer sp.Finish()
   399  
   400  	a, ok := res.(*PrometheusResponse)
   401  	if !ok {
   402  		return nil, httpgrpc.Errorf(http.StatusInternalServerError, "invalid response format")
   403  	}
   404  
   405  	sp.LogFields(otlog.Int("series", len(a.Data.Result)))
   406  
   407  	b, err := json.Marshal(a)
   408  	if err != nil {
   409  		return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error encoding response: %v", err)
   410  	}
   411  
   412  	sp.LogFields(otlog.Int("bytes", len(b)))
   413  
   414  	resp := http.Response{
   415  		Header: http.Header{
   416  			"Content-Type": []string{"application/json"},
   417  		},
   418  		Body:          ioutil.NopCloser(bytes.NewBuffer(b)),
   419  		StatusCode:    http.StatusOK,
   420  		ContentLength: int64(len(b)),
   421  	}
   422  	return &resp, nil
   423  }
   424  
   425  // UnmarshalJSON implements json.Unmarshaler and is used for unmarshalling
   426  // a Prometheus range query response (matrix).
   427  func (s *SampleStream) UnmarshalJSON(data []byte) error {
   428  	var sampleStream model.SampleStream
   429  	if err := json.Unmarshal(data, &sampleStream); err != nil {
   430  		return err
   431  	}
   432  
   433  	s.Labels = cortexpb.FromMetricsToLabelAdapters(sampleStream.Metric)
   434  
   435  	if len(sampleStream.Values) > 0 {
   436  		s.Samples = make([]cortexpb.Sample, 0, len(sampleStream.Values))
   437  		for _, sample := range sampleStream.Values {
   438  			s.Samples = append(s.Samples, cortexpb.Sample{
   439  				Value:       float64(sample.Value),
   440  				TimestampMs: int64(sample.Timestamp),
   441  			})
   442  		}
   443  	}
   444  
   445  	if len(sampleStream.Histograms) > 0 {
   446  		s.Histograms = make([]SampleHistogramPair, 0, len(sampleStream.Histograms))
   447  		for _, h := range sampleStream.Histograms {
   448  			s.Histograms = append(s.Histograms, fromModelSampleHistogramPair(h))
   449  		}
   450  	}
   451  
   452  	return nil
   453  }
   454  
   455  // MarshalJSON implements json.Marshaler.
   456  func (s *SampleStream) MarshalJSON() ([]byte, error) {
   457  	var sampleStream model.SampleStream
   458  	sampleStream.Metric = cortexpb.FromLabelAdaptersToMetric(s.Labels)
   459  
   460  	sampleStream.Values = make([]model.SamplePair, 0, len(s.Samples))
   461  	for _, sample := range s.Samples {
   462  		sampleStream.Values = append(sampleStream.Values, model.SamplePair{
   463  			Value:     model.SampleValue(sample.Value),
   464  			Timestamp: model.Time(sample.TimestampMs),
   465  		})
   466  	}
   467  
   468  	sampleStream.Histograms = make([]model.SampleHistogramPair, 0, len(s.Histograms))
   469  	for _, h := range s.Histograms {
   470  		sampleStream.Histograms = append(sampleStream.Histograms, toModelSampleHistogramPair(h))
   471  	}
   472  
   473  	return json.Marshal(sampleStream)
   474  }
   475  
   476  // UnmarshalJSON implements json.Unmarshaler and is used for unmarshalling
   477  // a Prometheus instant query response (vector).
   478  func (s *Sample) UnmarshalJSON(data []byte) error {
   479  	var sample model.Sample
   480  	if err := json.Unmarshal(data, &sample); err != nil {
   481  		return err
   482  	}
   483  	s.Labels = cortexpb.FromMetricsToLabelAdapters(sample.Metric)
   484  	s.SampleValue = float64(sample.Value)
   485  	s.Timestamp = int64(sample.Timestamp)
   486  
   487  	if sample.Histogram != nil {
   488  		sh := fromModelSampleHistogram(sample.Histogram)
   489  		s.Histogram = &sh
   490  	} else {
   491  		s.Histogram = nil
   492  	}
   493  
   494  	return nil
   495  }
   496  
   497  // MarshalJSON implements json.Marshaler.
   498  func (s *Sample) MarshalJSON() ([]byte, error) {
   499  	var sample model.Sample
   500  	sample.Metric = cortexpb.FromLabelAdaptersToMetric(s.Labels)
   501  	sample.Value = model.SampleValue(s.SampleValue)
   502  	sample.Timestamp = model.Time(s.Timestamp)
   503  	if s.Histogram != nil {
   504  		sample.Histogram = toModelSampleHistogram(*s.Histogram)
   505  	}
   506  	return json.Marshal(sample)
   507  }
   508  
   509  // MarshalJSON implements json.Marshaler.
   510  func (s StringSample) MarshalJSON() ([]byte, error) {
   511  	v, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(model.String{
   512  		Value:     s.Value,
   513  		Timestamp: model.Time(s.TimestampMs),
   514  	})
   515  	if err != nil {
   516  		return nil, err
   517  	}
   518  	return v, nil
   519  }
   520  
   521  // UnmarshalJSON implements json.Unmarshaler.
   522  func (s *StringSample) UnmarshalJSON(b []byte) error {
   523  	var v model.String
   524  	vs := [...]stdjson.Unmarshaler{&v}
   525  	if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &vs); err != nil {
   526  		return err
   527  	}
   528  	s.TimestampMs = int64(v.Timestamp)
   529  	s.Value = v.Value
   530  	return nil
   531  }
   532  
   533  // UnmarshalJSON implements json.Unmarshaler.
   534  func (s *PrometheusInstantQueryData) UnmarshalJSON(data []byte) error {
   535  	var queryData struct {
   536  		ResultType  string                   `json:"resultType"`
   537  		Result      jsoniter.RawMessage      `json:"result"`
   538  		Stats       *PrometheusResponseStats `json:"stats,omitempty"`
   539  		Explanation *Explanation             `json:"explanation,omitempty"`
   540  	}
   541  
   542  	if err := json.Unmarshal(data, &queryData); err != nil {
   543  		return err
   544  	}
   545  
   546  	s.ResultType = queryData.ResultType
   547  	s.Stats = queryData.Stats
   548  	s.Explanation = queryData.Explanation
   549  	switch s.ResultType {
   550  	case model.ValVector.String():
   551  		var result struct {
   552  			Samples []*Sample `json:"result"`
   553  		}
   554  		if err := json.Unmarshal(data, &result); err != nil {
   555  			return err
   556  		}
   557  		s.Result = PrometheusInstantQueryResult{
   558  			Result: &PrometheusInstantQueryResult_Vector{Vector: &Vector{
   559  				Samples: result.Samples,
   560  			}},
   561  		}
   562  	case model.ValMatrix.String():
   563  		var result struct {
   564  			SampleStreams []*SampleStream `json:"result"`
   565  		}
   566  		if err := json.Unmarshal(data, &result); err != nil {
   567  			return err
   568  		}
   569  		s.Result = PrometheusInstantQueryResult{
   570  			Result: &PrometheusInstantQueryResult_Matrix{Matrix: &Matrix{
   571  				SampleStreams: result.SampleStreams,
   572  			}},
   573  		}
   574  	case model.ValScalar.String():
   575  		var result struct {
   576  			Scalar cortexpb.Sample `json:"result"`
   577  		}
   578  		if err := json.Unmarshal(data, &result); err != nil {
   579  			return err
   580  		}
   581  		s.Result = PrometheusInstantQueryResult{
   582  			Result: &PrometheusInstantQueryResult_Scalar{Scalar: &result.Scalar},
   583  		}
   584  	case model.ValString.String():
   585  		var result struct {
   586  			Sample model.String `json:"result"`
   587  		}
   588  		if err := json.Unmarshal(data, &result); err != nil {
   589  			return err
   590  		}
   591  		s.Result = PrometheusInstantQueryResult{
   592  			Result: &PrometheusInstantQueryResult_StringSample{StringSample: &StringSample{
   593  				TimestampMs: int64(result.Sample.Timestamp),
   594  				Value:       result.Sample.Value,
   595  			}},
   596  		}
   597  	default:
   598  		return errors.New(fmt.Sprintf("%s result type not supported for PrometheusInstantQueryData", s.ResultType))
   599  	}
   600  	return nil
   601  }
   602  
   603  // MarshalJSON implements json.Marshaler.
   604  func (s *PrometheusInstantQueryData) MarshalJSON() ([]byte, error) {
   605  	switch s.ResultType {
   606  	case model.ValVector.String():
   607  		res := struct {
   608  			ResultType  string                   `json:"resultType"`
   609  			Data        []*Sample                `json:"result"`
   610  			Stats       *PrometheusResponseStats `json:"stats,omitempty"`
   611  			Explanation *Explanation             `json:"explanation,omitempty"`
   612  		}{
   613  			ResultType:  s.ResultType,
   614  			Data:        s.Result.GetVector().Samples,
   615  			Stats:       s.Stats,
   616  			Explanation: s.Explanation,
   617  		}
   618  		return json.Marshal(res)
   619  	case model.ValMatrix.String():
   620  		res := struct {
   621  			ResultType  string                   `json:"resultType"`
   622  			Data        []*SampleStream          `json:"result"`
   623  			Stats       *PrometheusResponseStats `json:"stats,omitempty"`
   624  			Explanation *Explanation             `json:"explanation,omitempty"`
   625  		}{
   626  			ResultType:  s.ResultType,
   627  			Data:        s.Result.GetMatrix().SampleStreams,
   628  			Stats:       s.Stats,
   629  			Explanation: s.Explanation,
   630  		}
   631  		return json.Marshal(res)
   632  	case model.ValScalar.String():
   633  		res := struct {
   634  			ResultType  string                   `json:"resultType"`
   635  			Data        *cortexpb.Sample         `json:"result"`
   636  			Stats       *PrometheusResponseStats `json:"stats,omitempty"`
   637  			Explanation *Explanation             `json:"explanation,omitempty"`
   638  		}{
   639  			ResultType:  s.ResultType,
   640  			Data:        s.Result.GetScalar(),
   641  			Stats:       s.Stats,
   642  			Explanation: s.Explanation,
   643  		}
   644  		return json.Marshal(res)
   645  	case model.ValString.String():
   646  		res := struct {
   647  			ResultType  string                   `json:"resultType"`
   648  			Data        *StringSample            `json:"result"`
   649  			Stats       *PrometheusResponseStats `json:"stats,omitempty"`
   650  			Explanation *Explanation             `json:"explanation,omitempty"`
   651  		}{
   652  			ResultType:  s.ResultType,
   653  			Data:        s.Result.GetStringSample(),
   654  			Stats:       s.Stats,
   655  			Explanation: s.Explanation,
   656  		}
   657  		return json.Marshal(res)
   658  	default:
   659  		return nil, errors.New(fmt.Sprintf("%s result type not supported for PrometheusInstantQueryData", s.ResultType))
   660  	}
   661  }
   662  
   663  // StatsMerge merge the stats from 2 responses
   664  // this function is similar to matrixMerge
   665  func StatsMerge(resps []Response) *PrometheusResponseStats {
   666  	output := map[int64]*PrometheusResponseQueryableSamplesStatsPerStep{}
   667  	hasStats := false
   668  	for _, resp := range resps {
   669  		stats := resp.GetStats()
   670  		if stats == nil {
   671  			continue
   672  		}
   673  
   674  		hasStats = true
   675  		if stats.Samples == nil {
   676  			continue
   677  		}
   678  
   679  		for _, s := range stats.Samples.TotalQueryableSamplesPerStep {
   680  			output[s.GetTimestampMs()] = s
   681  		}
   682  	}
   683  
   684  	if !hasStats {
   685  		return nil
   686  	}
   687  
   688  	keys := make([]int64, 0, len(output))
   689  	for key := range output {
   690  		keys = append(keys, key)
   691  	}
   692  
   693  	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
   694  
   695  	result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}}
   696  	for _, key := range keys {
   697  		result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, output[key])
   698  		result.Samples.TotalQueryableSamples += output[key].Value
   699  	}
   700  
   701  	return result
   702  }
   703  
   704  func matrixMerge(resps []*PrometheusResponse) []SampleStream {
   705  	output := map[string]*SampleStream{}
   706  	for _, resp := range resps {
   707  		for _, stream := range resp.Data.Result {
   708  			metric := cortexpb.FromLabelAdaptersToLabels(stream.Labels).String()
   709  			existing, ok := output[metric]
   710  			if !ok {
   711  				existing = &SampleStream{
   712  					Labels: stream.Labels,
   713  				}
   714  			}
   715  			// We need to make sure we don't repeat samples. This causes some visualisations to be broken in Grafana.
   716  			// The prometheus API is inclusive of start and end timestamps.
   717  			if len(existing.Samples) > 0 && len(stream.Samples) > 0 {
   718  				existingEndTs := existing.Samples[len(existing.Samples)-1].TimestampMs
   719  				if existingEndTs == stream.Samples[0].TimestampMs {
   720  					// Typically this the cases where only 1 sample point overlap,
   721  					// so optimize with simple code.
   722  					stream.Samples = stream.Samples[1:]
   723  				} else if existingEndTs > stream.Samples[0].TimestampMs {
   724  					// Overlap might be big, use heavier algorithm to remove overlap.
   725  					stream.Samples = SliceSamples(stream.Samples, existingEndTs)
   726  				} // else there is no overlap, yay!
   727  			}
   728  			// Same for histograms as for samples above.
   729  			if len(existing.Histograms) > 0 && len(stream.Histograms) > 0 {
   730  				existingEndTs := existing.Histograms[len(existing.Histograms)-1].GetTimestamp()
   731  				if existingEndTs == stream.Histograms[0].GetTimestamp() {
   732  					stream.Histograms = stream.Histograms[1:]
   733  				} else if existingEndTs > stream.Histograms[0].GetTimestamp() {
   734  					stream.Histograms = SliceHistogram(stream.Histograms, existingEndTs)
   735  				}
   736  			}
   737  
   738  			existing.Samples = append(existing.Samples, stream.Samples...)
   739  
   740  			existing.Histograms = append(existing.Histograms, stream.Histograms...)
   741  
   742  			output[metric] = existing
   743  		}
   744  	}
   745  
   746  	keys := make([]string, 0, len(output))
   747  	for key := range output {
   748  		keys = append(keys, key)
   749  	}
   750  	sort.Strings(keys)
   751  
   752  	result := make([]SampleStream, 0, len(output))
   753  	for _, key := range keys {
   754  		result = append(result, *output[key])
   755  	}
   756  
   757  	return result
   758  }
   759  
   760  // SliceSamples assumes given samples are sorted by timestamp in ascending order and
   761  // return a sub slice whose first element's is the smallest timestamp that is strictly
   762  // bigger than the given minTs. Empty slice is returned if minTs is bigger than all the
   763  // timestamps in samples.
   764  func SliceSamples(samples []cortexpb.Sample, minTs int64) []cortexpb.Sample {
   765  	if len(samples) <= 0 || minTs < samples[0].TimestampMs {
   766  		return samples
   767  	}
   768  
   769  	if len(samples) > 0 && minTs > samples[len(samples)-1].TimestampMs {
   770  		return samples[len(samples):]
   771  	}
   772  
   773  	searchResult := sort.Search(len(samples), func(i int) bool {
   774  		return samples[i].TimestampMs > minTs
   775  	})
   776  
   777  	return samples[searchResult:]
   778  }
   779  
   780  // SliceHistogram assumes given histogram are sorted by timestamp in ascending order and
   781  // return a sub slice whose first element's is the smallest timestamp that is strictly
   782  // bigger than the given minTs. Empty slice is returned if minTs is bigger than all the
   783  // timestamps in histogram.
   784  func SliceHistogram(histograms []SampleHistogramPair, minTs int64) []SampleHistogramPair {
   785  	if len(histograms) <= 0 || minTs < histograms[0].GetTimestamp() {
   786  		return histograms
   787  	}
   788  
   789  	if len(histograms) > 0 && minTs > histograms[len(histograms)-1].GetTimestamp() {
   790  		return histograms[len(histograms):]
   791  	}
   792  
   793  	searchResult := sort.Search(len(histograms), func(i int) bool {
   794  		return histograms[i].GetTimestamp() > minTs
   795  	})
   796  
   797  	return histograms[searchResult:]
   798  }
   799  
   800  func parseDurationMs(s string) (int64, error) {
   801  	if d, err := strconv.ParseFloat(s, 64); err == nil {
   802  		ts := d * float64(time.Second/time.Millisecond)
   803  		if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
   804  			return 0, httpgrpc.Errorf(http.StatusBadRequest, "cannot parse %q to a valid duration. It overflows int64", s)
   805  		}
   806  		return int64(ts), nil
   807  	}
   808  	if d, err := model.ParseDuration(s); err == nil {
   809  		return int64(d) / int64(time.Millisecond/time.Nanosecond), nil
   810  	}
   811  	return 0, httpgrpc.Errorf(http.StatusBadRequest, "cannot parse %q to a valid duration", s)
   812  }
   813  
   814  func encodeTime(t int64) string {
   815  	f := float64(t) / 1.0e3
   816  	return strconv.FormatFloat(f, 'f', -1, 64)
   817  }
   818  
   819  func encodeDurationMs(d int64) string {
   820  	return strconv.FormatFloat(float64(d)/float64(time.Second/time.Millisecond), 'f', -1, 64)
   821  }
   822  
   823  func decorateWithParamName(err error, field string) error {
   824  	errTmpl := "invalid parameter %q; %v"
   825  	if status, ok := status.FromError(err); ok {
   826  		return httpgrpc.Errorf(int(status.Code()), errTmpl, field, status.Message())
   827  	}
   828  	return fmt.Errorf(errTmpl, field, err)
   829  }
   830  
   831  func PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
   832  	if !iter.ReadArray() {
   833  		iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected [")
   834  		return
   835  	}
   836  
   837  	t := model.Time(iter.ReadFloat64() * float64(time.Second/time.Millisecond))
   838  
   839  	if !iter.ReadArray() {
   840  		iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected ,")
   841  		return
   842  	}
   843  	v := iter.ReadInt64()
   844  
   845  	if iter.ReadArray() {
   846  		iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected ]")
   847  	}
   848  
   849  	*(*PrometheusResponseQueryableSamplesStatsPerStep)(ptr) = PrometheusResponseQueryableSamplesStatsPerStep{
   850  		TimestampMs: int64(t),
   851  		Value:       v,
   852  	}
   853  }
   854  
   855  func PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
   856  	stats := (*PrometheusResponseQueryableSamplesStatsPerStep)(ptr)
   857  	stream.WriteArrayStart()
   858  	stream.WriteFloat64(float64(stats.TimestampMs) / float64(time.Second/time.Millisecond))
   859  	stream.WriteMore()
   860  	stream.WriteInt64(stats.Value)
   861  	stream.WriteArrayEnd()
   862  }
   863  
   864  func init() {
   865  	jsoniter.RegisterTypeEncoderFunc("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode, func(unsafe.Pointer) bool { return false })
   866  	jsoniter.RegisterTypeDecoderFunc("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode)
   867  }