github.com/thanos-io/thanos@v0.32.5/internal/cortex/querier/queryrange/results_cache.go (about)

     1  // Copyright (c) The Cortex Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package queryrange
     5  
     6  import (
     7  	"context"
     8  	"flag"
     9  	"fmt"
    10  	"net/http"
    11  	"sort"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/go-kit/log"
    16  	"github.com/go-kit/log/level"
    17  	"github.com/gogo/protobuf/proto"
    18  	"github.com/gogo/protobuf/types"
    19  	"github.com/opentracing/opentracing-go"
    20  	otlog "github.com/opentracing/opentracing-go/log"
    21  	"github.com/pkg/errors"
    22  	"github.com/prometheus/client_golang/prometheus"
    23  	"github.com/prometheus/common/model"
    24  	"github.com/prometheus/prometheus/model/timestamp"
    25  	"github.com/prometheus/prometheus/promql"
    26  	"github.com/prometheus/prometheus/promql/parser"
    27  	"github.com/uber/jaeger-client-go"
    28  	"github.com/weaveworks/common/httpgrpc"
    29  
    30  	"github.com/thanos-io/thanos/internal/cortex/chunk/cache"
    31  	"github.com/thanos-io/thanos/internal/cortex/cortexpb"
    32  	"github.com/thanos-io/thanos/internal/cortex/querier"
    33  	"github.com/thanos-io/thanos/internal/cortex/tenant"
    34  	"github.com/thanos-io/thanos/internal/cortex/util/spanlogger"
    35  	"github.com/thanos-io/thanos/internal/cortex/util/validation"
    36  )
    37  
    38  var (
    39  	// Value that cacheControlHeader has if the response indicates that the results should not be cached.
    40  	noStoreValue = "no-store"
    41  
    42  	// ResultsCacheGenNumberHeaderName holds name of the header we want to set in http response
    43  	ResultsCacheGenNumberHeaderName = "Results-Cache-Gen-Number"
    44  )
    45  
    46  type CacheGenNumberLoader interface {
    47  	GetResultsCacheGenNumber(tenantIDs []string) string
    48  }
    49  
    50  // ResultsCacheConfig is the config for the results cache.
    51  type ResultsCacheConfig struct {
    52  	CacheConfig                cache.Config `yaml:"cache"`
    53  	Compression                string       `yaml:"compression"`
    54  	CacheQueryableSamplesStats bool         `yaml:"cache_queryable_samples_stats"`
    55  }
    56  
    57  // RegisterFlags registers flags.
    58  func (cfg *ResultsCacheConfig) RegisterFlags(f *flag.FlagSet) {
    59  	cfg.CacheConfig.RegisterFlagsWithPrefix("frontend.", "", f)
    60  
    61  	f.StringVar(&cfg.Compression, "frontend.compression", "", "Use compression in results cache. Supported values are: 'snappy' and '' (disable compression).")
    62  	f.BoolVar(&cfg.CacheQueryableSamplesStats, "frontend.cache-queryable-samples-stats", false, "Cache Statistics queryable samples on results cache.")
    63  }
    64  
    65  func (cfg *ResultsCacheConfig) Validate(qCfg querier.Config) error {
    66  	switch cfg.Compression {
    67  	case "snappy", "":
    68  		// valid
    69  	default:
    70  		return errors.Errorf("unsupported compression type: %s", cfg.Compression)
    71  	}
    72  
    73  	if cfg.CacheQueryableSamplesStats && !qCfg.EnablePerStepStats {
    74  		return errors.New("frontend.cache-queryable-samples-stats may only be enabled in conjunction with querier.per-step-stats-enabled. Please set the latter")
    75  	}
    76  
    77  	return cfg.CacheConfig.Validate()
    78  }
    79  
    80  // Extractor is used by the cache to extract a subset of a response from a cache entry.
    81  type Extractor interface {
    82  	// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds in the `from` response.
    83  	Extract(start, end int64, from Response) Response
    84  	ResponseWithoutHeaders(resp Response) Response
    85  	ResponseWithoutStats(resp Response) Response
    86  }
    87  
    88  // PrometheusResponseExtractor helps extracting specific info from Query Response.
    89  type PrometheusResponseExtractor struct{}
    90  
    91  // Extract extracts response for specific a range from a response.
    92  func (PrometheusResponseExtractor) Extract(start, end int64, from Response) Response {
    93  	promRes := from.(*PrometheusResponse)
    94  	return &PrometheusResponse{
    95  		Status: StatusSuccess,
    96  		Data: PrometheusData{
    97  			ResultType:  promRes.Data.ResultType,
    98  			Result:      extractMatrix(start, end, promRes.Data.Result),
    99  			Stats:       extractStats(start, end, promRes.Data.Stats),
   100  			Explanation: promRes.Data.Explanation,
   101  		},
   102  		Headers: promRes.Headers,
   103  	}
   104  }
   105  
   106  // ResponseWithoutHeaders is useful in caching data without headers since
   107  // we anyways do not need headers for sending back the response so this saves some space by reducing size of the objects.
   108  func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp Response) Response {
   109  	promRes := resp.(*PrometheusResponse)
   110  	return &PrometheusResponse{
   111  		Status: StatusSuccess,
   112  		Data: PrometheusData{
   113  			ResultType:  promRes.Data.ResultType,
   114  			Result:      promRes.Data.Result,
   115  			Stats:       promRes.Data.Stats,
   116  			Explanation: promRes.Data.Explanation,
   117  		},
   118  	}
   119  }
   120  
   121  // ResponseWithoutStats is returns the response without the stats information
   122  func (PrometheusResponseExtractor) ResponseWithoutStats(resp Response) Response {
   123  	promRes := resp.(*PrometheusResponse)
   124  	return &PrometheusResponse{
   125  		Status: StatusSuccess,
   126  		Data: PrometheusData{
   127  			ResultType:  promRes.Data.ResultType,
   128  			Result:      promRes.Data.Result,
   129  			Explanation: promRes.Data.Explanation,
   130  		},
   131  		Headers: promRes.Headers,
   132  	}
   133  }
   134  
   135  // CacheSplitter generates cache keys. This is a useful interface for downstream
   136  // consumers who wish to implement their own strategies.
   137  type CacheSplitter interface {
   138  	GenerateCacheKey(userID string, r Request) string
   139  }
   140  
   141  // constSplitter is a utility for using a constant split interval when determining cache keys
   142  type constSplitter time.Duration
   143  
   144  // GenerateCacheKey generates a cache key based on the userID, Request and interval.
   145  func (t constSplitter) GenerateCacheKey(userID string, r Request) string {
   146  	currentInterval := r.GetStart() / int64(time.Duration(t)/time.Millisecond)
   147  	return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
   148  }
   149  
   150  // ShouldCacheFn checks whether the current request should go to cache
   151  // or not. If not, just send the request to next handler.
   152  type ShouldCacheFn func(r Request) bool
   153  
   154  type resultsCache struct {
   155  	logger   log.Logger
   156  	cfg      ResultsCacheConfig
   157  	next     Handler
   158  	cache    cache.Cache
   159  	limits   Limits
   160  	splitter CacheSplitter
   161  
   162  	extractor                  Extractor
   163  	minCacheExtent             int64 // discard any cache extent smaller than this
   164  	merger                     Merger
   165  	cacheGenNumberLoader       CacheGenNumberLoader
   166  	shouldCache                ShouldCacheFn
   167  	cacheQueryableSamplesStats bool
   168  }
   169  
   170  // NewResultsCacheMiddleware creates results cache middleware from config.
   171  // The middleware cache result using a unique cache key for a given request (step,query,user) and interval.
   172  // The cache assumes that each request length (end-start) is below or equal the interval.
   173  // Each request starting from within the same interval will hit the same cache entry.
   174  // If the cache doesn't have the entire duration of the request cached, it will query the uncached parts and append them to the cache entries.
   175  // see `generateKey`.
   176  func NewResultsCacheMiddleware(
   177  	logger log.Logger,
   178  	cfg ResultsCacheConfig,
   179  	splitter CacheSplitter,
   180  	limits Limits,
   181  	merger Merger,
   182  	extractor Extractor,
   183  	cacheGenNumberLoader CacheGenNumberLoader,
   184  	shouldCache ShouldCacheFn,
   185  	reg prometheus.Registerer,
   186  ) (Middleware, cache.Cache, error) {
   187  	c, err := cache.New(cfg.CacheConfig, reg, logger)
   188  	if err != nil {
   189  		return nil, nil, err
   190  	}
   191  	if cfg.Compression == "snappy" {
   192  		c = cache.NewSnappy(c, logger)
   193  	}
   194  
   195  	if cacheGenNumberLoader != nil {
   196  		c = cache.NewCacheGenNumMiddleware(c)
   197  	}
   198  
   199  	return MiddlewareFunc(func(next Handler) Handler {
   200  		return &resultsCache{
   201  			logger:                     logger,
   202  			cfg:                        cfg,
   203  			next:                       next,
   204  			cache:                      c,
   205  			limits:                     limits,
   206  			merger:                     merger,
   207  			extractor:                  extractor,
   208  			minCacheExtent:             (5 * time.Minute).Milliseconds(),
   209  			splitter:                   splitter,
   210  			cacheGenNumberLoader:       cacheGenNumberLoader,
   211  			shouldCache:                shouldCache,
   212  			cacheQueryableSamplesStats: cfg.CacheQueryableSamplesStats,
   213  		}
   214  	}), c, nil
   215  }
   216  
   217  func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) {
   218  	tenantIDs, err := tenant.TenantIDs(ctx)
   219  	respWithStats := r.GetStats() != "" && s.cacheQueryableSamplesStats
   220  	if err != nil {
   221  		return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
   222  	}
   223  
   224  	// If cache_queryable_samples_stats is enabled we always need request the status upstream
   225  	if s.cacheQueryableSamplesStats {
   226  		r = r.WithStats("all")
   227  	} else {
   228  		r = r.WithStats("")
   229  	}
   230  
   231  	if s.shouldCache != nil && !s.shouldCache(r) {
   232  		return s.next.Do(ctx, r)
   233  	}
   234  
   235  	if s.cacheGenNumberLoader != nil {
   236  		ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs))
   237  	}
   238  
   239  	var (
   240  		key      = s.splitter.GenerateCacheKey(tenant.JoinTenantIDs(tenantIDs), r)
   241  		extents  []Extent
   242  		response Response
   243  	)
   244  
   245  	maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, s.limits.MaxCacheFreshness)
   246  	maxCacheTime := int64(model.Now().Add(-maxCacheFreshness))
   247  	if r.GetStart() > maxCacheTime {
   248  		return s.next.Do(ctx, r)
   249  	}
   250  
   251  	cached, ok := s.get(ctx, key)
   252  	if ok {
   253  		response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
   254  	} else {
   255  		response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
   256  	}
   257  
   258  	if err == nil && len(extents) > 0 {
   259  		extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents)
   260  		if err != nil {
   261  			return nil, err
   262  		}
   263  		s.put(ctx, key, extents)
   264  	}
   265  
   266  	if err == nil && !respWithStats {
   267  		response = s.extractor.ResponseWithoutStats(response)
   268  	}
   269  	return response, err
   270  }
   271  
   272  // shouldCacheResponse says whether the response should be cached or not.
   273  func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Response, maxCacheTime int64) bool {
   274  	headerValues := getHeaderValuesWithName(r, cacheControlHeader)
   275  	for _, v := range headerValues {
   276  		if v == noStoreValue {
   277  			level.Debug(s.logger).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue))
   278  			return false
   279  		}
   280  	}
   281  
   282  	if !s.isAtModifierCachable(req, maxCacheTime) {
   283  		return false
   284  	}
   285  
   286  	if s.cacheGenNumberLoader == nil {
   287  		return true
   288  	}
   289  
   290  	genNumbersFromResp := getHeaderValuesWithName(r, ResultsCacheGenNumberHeaderName)
   291  	genNumberFromCtx := cache.ExtractCacheGenNumber(ctx)
   292  
   293  	if len(genNumbersFromResp) == 0 && genNumberFromCtx != "" {
   294  		level.Debug(s.logger).Log("msg", fmt.Sprintf("we found results cache gen number %s set in store but none in headers", genNumberFromCtx))
   295  		return false
   296  	}
   297  
   298  	for _, gen := range genNumbersFromResp {
   299  		if gen != genNumberFromCtx {
   300  			level.Debug(s.logger).Log("msg", fmt.Sprintf("inconsistency in results cache gen numbers %s (GEN-FROM-RESPONSE) != %s (GEN-FROM-STORE), not caching the response", gen, genNumberFromCtx))
   301  			return false
   302  		}
   303  	}
   304  
   305  	return true
   306  }
   307  
   308  var errAtModifierAfterEnd = errors.New("at modifier after end")
   309  
   310  // isAtModifierCachable returns true if the @ modifier result
   311  // is safe to cache.
   312  func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool {
   313  	// There are 2 cases when @ modifier is not safe to cache:
   314  	//   1. When @ modifier points to time beyond the maxCacheTime.
   315  	//   2. If the @ modifier time is > the query range end while being
   316  	//      below maxCacheTime. In such cases if any tenant is intentionally
   317  	//      playing with old data, we could cache empty result if we look
   318  	//      beyond query end.
   319  	query := r.GetQuery()
   320  	if !strings.Contains(query, "@") {
   321  		return true
   322  	}
   323  	expr, err := parser.ParseExpr(query)
   324  	if err != nil {
   325  		// We are being pessimistic in such cases.
   326  		level.Warn(s.logger).Log("msg", "failed to parse query, considering @ modifier as not cachable", "query", query, "err", err)
   327  		return false
   328  	}
   329  
   330  	// This resolves the start() and end() used with the @ modifier.
   331  	expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd()))
   332  
   333  	end := r.GetEnd()
   334  	atModCachable := true
   335  	parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error {
   336  		switch e := n.(type) {
   337  		case *parser.VectorSelector:
   338  			if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) {
   339  				atModCachable = false
   340  				return errAtModifierAfterEnd
   341  			}
   342  		case *parser.MatrixSelector:
   343  			ts := e.VectorSelector.(*parser.VectorSelector).Timestamp
   344  			if ts != nil && (*ts > end || *ts > maxCacheTime) {
   345  				atModCachable = false
   346  				return errAtModifierAfterEnd
   347  			}
   348  		case *parser.SubqueryExpr:
   349  			if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) {
   350  				atModCachable = false
   351  				return errAtModifierAfterEnd
   352  			}
   353  		}
   354  		return nil
   355  	})
   356  
   357  	return atModCachable
   358  }
   359  
   360  func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) {
   361  	for _, hv := range r.GetHeaders() {
   362  		if hv.GetName() != headerName {
   363  			continue
   364  		}
   365  
   366  		headerValues = append(headerValues, hv.GetValues()...)
   367  	}
   368  
   369  	return
   370  }
   371  
   372  func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
   373  	response, err := s.next.Do(ctx, r)
   374  	if err != nil {
   375  		return nil, nil, err
   376  	}
   377  
   378  	if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) {
   379  		return response, []Extent{}, nil
   380  	}
   381  
   382  	extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response))
   383  	if err != nil {
   384  		return nil, nil, err
   385  	}
   386  
   387  	extents := []Extent{
   388  		extent,
   389  	}
   390  	return response, extents, nil
   391  }
   392  
   393  func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
   394  	var (
   395  		reqResps []RequestResponse
   396  		err      error
   397  	)
   398  	log, ctx := spanlogger.New(ctx, "handleHit")
   399  	defer log.Finish()
   400  
   401  	requests, responses, err := s.partition(r, extents)
   402  	if err != nil {
   403  		return nil, nil, err
   404  	}
   405  	if len(requests) == 0 {
   406  		response, err := s.merger.MergeResponse(r, responses...)
   407  		// No downstream requests so no need to write back to the cache.
   408  		return response, nil, err
   409  	}
   410  
   411  	reqResps, err = DoRequests(ctx, s.next, requests, s.limits)
   412  	if err != nil {
   413  		return nil, nil, err
   414  	}
   415  
   416  	for _, reqResp := range reqResps {
   417  		responses = append(responses, reqResp.Response)
   418  		if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) {
   419  			continue
   420  		}
   421  		extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response))
   422  		if err != nil {
   423  			return nil, nil, err
   424  		}
   425  		extents = append(extents, extent)
   426  	}
   427  	sort.Slice(extents, func(i, j int) bool {
   428  		if extents[i].Start == extents[j].Start {
   429  			// as an optimization, for two extents starts at the same time, we
   430  			// put bigger extent at the front of the slice, which helps
   431  			// to reduce the amount of merge we have to do later.
   432  			return extents[i].End > extents[j].End
   433  		}
   434  
   435  		return extents[i].Start < extents[j].Start
   436  	})
   437  
   438  	// Merge any extents - potentially overlapping
   439  	accumulator, err := newAccumulator(extents[0])
   440  	if err != nil {
   441  		return nil, nil, err
   442  	}
   443  	mergedExtents := make([]Extent, 0, len(extents))
   444  
   445  	for i := 1; i < len(extents); i++ {
   446  		if accumulator.End+r.GetStep() < extents[i].Start {
   447  			mergedExtents, err = merge(mergedExtents, accumulator)
   448  			if err != nil {
   449  				return nil, nil, err
   450  			}
   451  			accumulator, err = newAccumulator(extents[i])
   452  			if err != nil {
   453  				return nil, nil, err
   454  			}
   455  			continue
   456  		}
   457  
   458  		if accumulator.End >= extents[i].End {
   459  			continue
   460  		}
   461  
   462  		accumulator.TraceId = jaegerTraceID(ctx)
   463  		accumulator.End = extents[i].End
   464  		currentRes, err := extents[i].toResponse()
   465  		if err != nil {
   466  			return nil, nil, err
   467  		}
   468  		merged, err := s.merger.MergeResponse(r, accumulator.Response, currentRes)
   469  		if err != nil {
   470  			return nil, nil, err
   471  		}
   472  		accumulator.Response = merged
   473  	}
   474  
   475  	mergedExtents, err = merge(mergedExtents, accumulator)
   476  	if err != nil {
   477  		return nil, nil, err
   478  	}
   479  
   480  	response, err := s.merger.MergeResponse(r, responses...)
   481  	return response, mergedExtents, err
   482  }
   483  
   484  type accumulator struct {
   485  	Response
   486  	Extent
   487  }
   488  
   489  func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
   490  	any, err := types.MarshalAny(acc.Response)
   491  	if err != nil {
   492  		return nil, err
   493  	}
   494  	return append(extents, Extent{
   495  		Start:    acc.Extent.Start,
   496  		End:      acc.Extent.End,
   497  		Response: any,
   498  		TraceId:  acc.Extent.TraceId,
   499  	}), nil
   500  }
   501  
   502  func newAccumulator(base Extent) (*accumulator, error) {
   503  	res, err := base.toResponse()
   504  	if err != nil {
   505  		return nil, err
   506  	}
   507  	return &accumulator{
   508  		Response: res,
   509  		Extent:   base,
   510  	}, nil
   511  }
   512  
   513  func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
   514  	any, err := types.MarshalAny(res)
   515  	if err != nil {
   516  		return Extent{}, err
   517  	}
   518  	return Extent{
   519  		Start:    req.GetStart(),
   520  		End:      req.GetEnd(),
   521  		Response: any,
   522  		TraceId:  jaegerTraceID(ctx),
   523  	}, nil
   524  }
   525  
   526  // partition calculates the required requests to satisfy req given the cached data.
   527  // extents must be in order by start time.
   528  func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) {
   529  	var requests []Request
   530  	var cachedResponses []Response
   531  	start := req.GetStart()
   532  
   533  	for _, extent := range extents {
   534  		// If there is no overlap, ignore this extent.
   535  		if extent.GetEnd() < start || extent.Start > req.GetEnd() {
   536  			continue
   537  		}
   538  
   539  		// If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
   540  		// Hopefully tiny request can make tiny extent into not-so-tiny extent.
   541  
   542  		// However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end.
   543  		// For example, if the step size is more than 12h and the interval is 24h.
   544  		// This means the extent's start and end time would be same, even if the timerange covers several hours.
   545  		if (req.GetStart() != req.GetEnd()) && (req.GetEnd()-req.GetStart() > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) {
   546  			continue
   547  		}
   548  
   549  		// If there is a bit missing at the front, make a request for that.
   550  		if start < extent.Start {
   551  			r := req.WithStartEnd(start, extent.Start)
   552  			requests = append(requests, r)
   553  		}
   554  		res, err := extent.toResponse()
   555  		if err != nil {
   556  			return nil, nil, err
   557  		}
   558  		// extract the overlap from the cached extent.
   559  		cachedResponses = append(cachedResponses, s.extractor.Extract(start, req.GetEnd(), res))
   560  		start = extent.End
   561  	}
   562  
   563  	// Lastly, make a request for any data missing at the end.
   564  	if start < req.GetEnd() {
   565  		r := req.WithStartEnd(start, req.GetEnd())
   566  		requests = append(requests, r)
   567  	}
   568  
   569  	// If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
   570  	// But we should only do the request if we don't have a valid cached response for it.
   571  	if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
   572  		requests = append(requests, req)
   573  	}
   574  
   575  	return requests, cachedResponses, nil
   576  }
   577  
   578  func (s resultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) {
   579  	maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / req.GetStep()) * req.GetStep()
   580  	for i := range extents {
   581  		// Never cache data for the latest freshness period.
   582  		if extents[i].End > maxCacheTime {
   583  			extents[i].End = maxCacheTime
   584  			res, err := extents[i].toResponse()
   585  			if err != nil {
   586  				return nil, err
   587  			}
   588  			extracted := s.extractor.Extract(extents[i].Start, maxCacheTime, res)
   589  			any, err := types.MarshalAny(extracted)
   590  			if err != nil {
   591  				return nil, err
   592  			}
   593  			extents[i].Response = any
   594  		}
   595  	}
   596  	return extents, nil
   597  }
   598  
   599  func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
   600  	found, bufs, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
   601  	if len(found) != 1 {
   602  		return nil, false
   603  	}
   604  
   605  	var resp CachedResponse
   606  	log, ctx := spanlogger.New(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
   607  	defer log.Finish()
   608  
   609  	log.LogFields(otlog.Int("bytes", len(bufs[0])))
   610  
   611  	if err := proto.Unmarshal(bufs[0], &resp); err != nil {
   612  		level.Error(log).Log("msg", "error unmarshalling cached value", "err", err)
   613  		log.Error(err)
   614  		return nil, false
   615  	}
   616  
   617  	if resp.Key != key {
   618  		return nil, false
   619  	}
   620  
   621  	// Refreshes the cache if it contains an old proto schema.
   622  	for _, e := range resp.Extents {
   623  		if e.Response == nil {
   624  			return nil, false
   625  		}
   626  	}
   627  
   628  	return resp.Extents, true
   629  }
   630  
   631  func (s resultsCache) put(ctx context.Context, key string, extents []Extent) {
   632  	buf, err := proto.Marshal(&CachedResponse{
   633  		Key:     key,
   634  		Extents: extents,
   635  	})
   636  	if err != nil {
   637  		level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err)
   638  		return
   639  	}
   640  
   641  	s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
   642  }
   643  
   644  func jaegerTraceID(ctx context.Context) string {
   645  	span := opentracing.SpanFromContext(ctx)
   646  	if span == nil {
   647  		return ""
   648  	}
   649  
   650  	spanContext, ok := span.Context().(jaeger.SpanContext)
   651  	if !ok {
   652  		return ""
   653  	}
   654  
   655  	return spanContext.TraceID().String()
   656  }
   657  
   658  // extractStats returns the stats for a given time range
   659  // this function is similar to extractSampleStream
   660  func extractStats(start, end int64, stats *PrometheusResponseStats) *PrometheusResponseStats {
   661  	if stats == nil || stats.Samples == nil {
   662  		return stats
   663  	}
   664  
   665  	result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}}
   666  	for _, s := range stats.Samples.TotalQueryableSamplesPerStep {
   667  		if start <= s.TimestampMs && s.TimestampMs <= end {
   668  			result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, s)
   669  			result.Samples.TotalQueryableSamples += s.Value
   670  		}
   671  	}
   672  	return result
   673  }
   674  
   675  func extractMatrix(start, end int64, matrix []SampleStream) []SampleStream {
   676  	result := make([]SampleStream, 0, len(matrix))
   677  	for _, stream := range matrix {
   678  		extracted, ok := extractSampleStream(start, end, stream)
   679  		if ok {
   680  			result = append(result, extracted)
   681  		}
   682  	}
   683  	return result
   684  }
   685  
   686  func extractSampleStream(start, end int64, stream SampleStream) (SampleStream, bool) {
   687  	result := SampleStream{
   688  		Labels: stream.Labels,
   689  	}
   690  
   691  	if len(stream.Samples) > 0 {
   692  		result.Samples = make([]cortexpb.Sample, 0, len(stream.Samples))
   693  	}
   694  
   695  	if len(stream.Histograms) > 0 {
   696  		result.Histograms = make([]SampleHistogramPair, 0, len(stream.Histograms))
   697  	}
   698  
   699  	for _, sample := range stream.Samples {
   700  		if start <= sample.TimestampMs && sample.TimestampMs <= end {
   701  			result.Samples = append(result.Samples, sample)
   702  		}
   703  	}
   704  	for _, histogram := range stream.Histograms {
   705  		if start <= int64(histogram.GetTimestamp()) && int64(histogram.GetTimestamp()) <= end {
   706  			result.Histograms = append(result.Histograms, histogram)
   707  		}
   708  	}
   709  	if len(result.Samples) == 0 && len(result.Histograms) == 0 {
   710  		return SampleStream{}, false
   711  	}
   712  	return result, true
   713  }
   714  
   715  func (e *Extent) toResponse() (Response, error) {
   716  	msg, err := types.EmptyAny(e.Response)
   717  	if err != nil {
   718  		return nil, err
   719  	}
   720  
   721  	if err := types.UnmarshalAny(e.Response, msg); err != nil {
   722  		return nil, err
   723  	}
   724  
   725  	resp, ok := msg.(Response)
   726  	if !ok {
   727  		return nil, fmt.Errorf("bad cached type")
   728  	}
   729  	return resp, nil
   730  }