github.com/thanos-io/thanos@v0.32.5/internal/cortex/querier/queryrange/results_cache_test.go (about)

     1  // Copyright (c) The Cortex Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package queryrange
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"strconv"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/go-kit/log"
    14  	"github.com/gogo/protobuf/types"
    15  	"github.com/prometheus/common/model"
    16  	"github.com/stretchr/testify/assert"
    17  	"github.com/stretchr/testify/require"
    18  	"github.com/weaveworks/common/user"
    19  
    20  	"github.com/thanos-io/thanos/internal/cortex/chunk/cache"
    21  	"github.com/thanos-io/thanos/internal/cortex/cortexpb"
    22  	"github.com/thanos-io/thanos/internal/cortex/util/flagext"
    23  )
    24  
    25  const (
    26  	query                 = "/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&stats=all&step=120"
    27  	responseBody          = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}],"explanation":null}}`
    28  	histogramResponseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"fake":"histogram"},"histograms":[[1536673680,{"count":"5","sum":"18.4","buckets":[[3,"-0.001","0.001","2"],[0,"0.7071067811865475","1","1"],[0,"1","1.414213562373095","2"],[0,"2","2.82842712474619","1"],[0,"2.82842712474619","4","1"]]}]]}],"explanation":null}}`
    29  )
    30  
    31  var (
    32  	parsedRequest = &PrometheusRequest{
    33  		Path:  "/api/v1/query_range",
    34  		Start: 1536673680 * 1e3,
    35  		End:   1536716898 * 1e3,
    36  		Step:  120 * 1e3,
    37  		Query: "sum(container_memory_rss) by (namespace)",
    38  		Stats: "all",
    39  	}
    40  	parsedHistogramRequest = &PrometheusRequest{
    41  		Path:  "/api/v1/query_range",
    42  		Start: 1536673680 * 1e3,
    43  		End:   1536716898 * 1e3,
    44  		Step:  120 * 1e3,
    45  		Query: "{fake=\"histogram\"}",
    46  		Stats: "all",
    47  	}
    48  	reqHeaders = []*PrometheusRequestHeader{
    49  		{
    50  			Name:   "Test-Header",
    51  			Values: []string{"test"},
    52  		},
    53  	}
    54  	noCacheRequest = &PrometheusRequest{
    55  		Path:           "/api/v1/query_range",
    56  		Start:          1536673680 * 1e3,
    57  		End:            1536716898 * 1e3,
    58  		Step:           120 * 1e3,
    59  		Query:          "sum(container_memory_rss) by (namespace)",
    60  		CachingOptions: CachingOptions{Disabled: true},
    61  	}
    62  	noCacheRequestWithStats = &PrometheusRequest{
    63  		Path:           "/api/v1/query_range",
    64  		Start:          1536673680 * 1e3,
    65  		End:            1536716898 * 1e3,
    66  		Step:           120 * 1e3,
    67  		Stats:          "all",
    68  		Query:          "sum(container_memory_rss) by (namespace)",
    69  		CachingOptions: CachingOptions{Disabled: true},
    70  	}
    71  	respHeaders = []*PrometheusResponseHeader{
    72  		{
    73  			Name:   "Content-Type",
    74  			Values: []string{"application/json"},
    75  		},
    76  	}
    77  	parsedResponse = &PrometheusResponse{
    78  		Status: "success",
    79  		Data: PrometheusData{
    80  			ResultType: model.ValMatrix.String(),
    81  			Result: []SampleStream{
    82  				{
    83  					Labels: []cortexpb.LabelAdapter{
    84  						{Name: "foo", Value: "bar"},
    85  					},
    86  					Samples: []cortexpb.Sample{
    87  						{Value: 137, TimestampMs: 1536673680000},
    88  						{Value: 137, TimestampMs: 1536673780000},
    89  					},
    90  				},
    91  			},
    92  		},
    93  	}
    94  	parsedHistogramResponse = &PrometheusResponse{
    95  		Status: "success",
    96  		Data: PrometheusData{
    97  			ResultType: model.ValMatrix.String(),
    98  			Result: []SampleStream{
    99  				{
   100  					Labels: []cortexpb.LabelAdapter{
   101  						{Name: "fake", Value: "histogram"},
   102  					},
   103  					Histograms: []SampleHistogramPair{
   104  						{
   105  							Timestamp: 1536673680000,
   106  							Histogram: genSampleHistogram(),
   107  						},
   108  					},
   109  				},
   110  			},
   111  		},
   112  	}
   113  )
   114  
   115  func mkAPIResponse(start, end, step int64) *PrometheusResponse {
   116  	return mkAPIResponseWithStats(start, end, step, false)
   117  }
   118  
   119  func mkAPIResponseWithStats(start, end, step int64, withStats bool) *PrometheusResponse {
   120  	var samples []cortexpb.Sample
   121  	var stats *PrometheusResponseStats
   122  	if withStats {
   123  		stats = &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}}
   124  	}
   125  	for i := start; i <= end; i += step {
   126  		samples = append(samples, cortexpb.Sample{
   127  			TimestampMs: int64(i),
   128  			Value:       float64(i),
   129  		})
   130  
   131  		if withStats {
   132  			stats.Samples.TotalQueryableSamplesPerStep = append(stats.Samples.TotalQueryableSamplesPerStep, &PrometheusResponseQueryableSamplesStatsPerStep{
   133  				TimestampMs: i,
   134  				Value:       i,
   135  			})
   136  
   137  			stats.Samples.TotalQueryableSamples += i
   138  		}
   139  	}
   140  
   141  	return &PrometheusResponse{
   142  		Status: StatusSuccess,
   143  		Data: PrometheusData{
   144  			ResultType: matrix,
   145  			Stats:      stats,
   146  			Result: []SampleStream{
   147  				{
   148  					Labels: []cortexpb.LabelAdapter{
   149  						{Name: "foo", Value: "bar"},
   150  					},
   151  					Samples: samples,
   152  				},
   153  			},
   154  		},
   155  	}
   156  }
   157  
   158  func mkExtentWithStats(start, end int64) Extent {
   159  	return mkExtentWithStepWithStats(start, end, 10, true)
   160  }
   161  
   162  func mkExtent(start, end int64) Extent {
   163  	return mkExtentWithStepWithStats(start, end, 10, false)
   164  }
   165  
   166  func mkExtentWithStep(start, end, step int64) Extent {
   167  	return mkExtentWithStepWithStats(start, end, step, false)
   168  }
   169  
   170  func mkExtentWithStepWithStats(start, end, step int64, withStats bool) Extent {
   171  	res := mkAPIResponseWithStats(start, end, step, withStats)
   172  	any, err := types.MarshalAny(res)
   173  	if err != nil {
   174  		panic(err)
   175  	}
   176  	return Extent{
   177  		Start:    start,
   178  		End:      end,
   179  		Response: any,
   180  	}
   181  }
   182  
   183  func TestStatsCacheQuerySamples(t *testing.T) {
   184  
   185  	for _, tc := range []struct {
   186  		name                       string
   187  		cacheQueryableSamplesStats bool
   188  		err                        error
   189  		req                        Request
   190  		upstreamResponse           Response
   191  		expectedResponse           Response
   192  	}{
   193  		{
   194  			name:                       "should return error",
   195  			cacheQueryableSamplesStats: true,
   196  			req:                        noCacheRequest,
   197  			err:                        fmt.Errorf("error"),
   198  		},
   199  		{
   200  			name:                       "should return response with stats",
   201  			cacheQueryableSamplesStats: true,
   202  			req:                        noCacheRequestWithStats,
   203  			upstreamResponse:           mkAPIResponseWithStats(0, 100, 10, true),
   204  			expectedResponse:           mkAPIResponseWithStats(0, 100, 10, true),
   205  		},
   206  		{
   207  			name:                       "should return response strip stats if not requested",
   208  			cacheQueryableSamplesStats: true,
   209  			req:                        noCacheRequest,
   210  			upstreamResponse:           mkAPIResponseWithStats(0, 100, 10, false),
   211  			expectedResponse:           mkAPIResponseWithStats(0, 100, 10, false),
   212  		},
   213  		{
   214  			name:                       "should not ask stats is cacheQueryableSamplesStats is disabled",
   215  			cacheQueryableSamplesStats: false,
   216  			req:                        noCacheRequest,
   217  			upstreamResponse:           mkAPIResponseWithStats(0, 100, 10, false),
   218  			expectedResponse:           mkAPIResponseWithStats(0, 100, 10, false),
   219  		},
   220  		{
   221  			name:                       "should not forward stats when cacheQueryableSamplesStats is disabled",
   222  			cacheQueryableSamplesStats: false,
   223  			req:                        noCacheRequestWithStats,
   224  			upstreamResponse:           mkAPIResponseWithStats(0, 100, 10, true),
   225  			expectedResponse:           mkAPIResponseWithStats(0, 100, 10, false),
   226  		},
   227  	} {
   228  		t.Run(tc.name, func(t *testing.T) {
   229  			cfg := ResultsCacheConfig{
   230  				CacheConfig: cache.Config{
   231  					Cache: cache.NewMockCache(),
   232  				},
   233  				CacheQueryableSamplesStats: tc.cacheQueryableSamplesStats,
   234  			}
   235  			rcm, _, err := NewResultsCacheMiddleware(
   236  				log.NewNopLogger(),
   237  				cfg,
   238  				constSplitter(day),
   239  				mockLimits{},
   240  				PrometheusCodec,
   241  				PrometheusResponseExtractor{},
   242  				nil,
   243  				nil,
   244  				nil,
   245  			)
   246  			require.NoError(t, err)
   247  
   248  			rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   249  				if tc.cacheQueryableSamplesStats {
   250  					require.Equal(t, "all", req.GetStats())
   251  				} else {
   252  					require.Equal(t, "", req.GetStats())
   253  				}
   254  				return tc.upstreamResponse, tc.err
   255  			}))
   256  			ctx := user.InjectOrgID(context.Background(), "1")
   257  			r, err := rc.Do(ctx, tc.req)
   258  			require.Equal(t, tc.err, err)
   259  			require.Equal(t, tc.expectedResponse, r)
   260  		})
   261  	}
   262  }
   263  
   264  func TestShouldCache(t *testing.T) {
   265  	maxCacheTime := int64(150 * 1000)
   266  	c := &resultsCache{logger: log.NewNopLogger(), cacheGenNumberLoader: newMockCacheGenNumberLoader()}
   267  	for _, tc := range []struct {
   268  		name                   string
   269  		request                Request
   270  		input                  Response
   271  		cacheGenNumberToInject string
   272  		expected               bool
   273  	}{
   274  		// Tests only for cacheControlHeader
   275  		{
   276  			name:    "does not contain the cacheControl header",
   277  			request: &PrometheusRequest{Query: "metric"},
   278  			input: Response(&PrometheusResponse{
   279  				Headers: []*PrometheusResponseHeader{
   280  					{
   281  						Name:   "meaninglessheader",
   282  						Values: []string{},
   283  					},
   284  				},
   285  			}),
   286  			expected: true,
   287  		},
   288  		{
   289  			name:    "does contain the cacheControl header which has the value",
   290  			request: &PrometheusRequest{Query: "metric"},
   291  			input: Response(&PrometheusResponse{
   292  				Headers: []*PrometheusResponseHeader{
   293  					{
   294  						Name:   cacheControlHeader,
   295  						Values: []string{noStoreValue},
   296  					},
   297  				},
   298  			}),
   299  			expected: false,
   300  		},
   301  		{
   302  			name:    "cacheControl header contains extra values but still good",
   303  			request: &PrometheusRequest{Query: "metric"},
   304  			input: Response(&PrometheusResponse{
   305  				Headers: []*PrometheusResponseHeader{
   306  					{
   307  						Name:   cacheControlHeader,
   308  						Values: []string{"foo", noStoreValue},
   309  					},
   310  				},
   311  			}),
   312  			expected: false,
   313  		},
   314  		{
   315  			name:     "broken response",
   316  			request:  &PrometheusRequest{Query: "metric"},
   317  			input:    Response(&PrometheusResponse{}),
   318  			expected: true,
   319  		},
   320  		{
   321  			name:    "nil headers",
   322  			request: &PrometheusRequest{Query: "metric"},
   323  			input: Response(&PrometheusResponse{
   324  				Headers: []*PrometheusResponseHeader{nil},
   325  			}),
   326  			expected: true,
   327  		},
   328  		{
   329  			name:    "had cacheControl header but no values",
   330  			request: &PrometheusRequest{Query: "metric"},
   331  			input: Response(&PrometheusResponse{
   332  				Headers: []*PrometheusResponseHeader{{Name: cacheControlHeader}},
   333  			}),
   334  			expected: true,
   335  		},
   336  
   337  		// Tests only for cacheGenNumber header
   338  		{
   339  			name:    "cacheGenNumber not set in both header and store",
   340  			request: &PrometheusRequest{Query: "metric"},
   341  			input: Response(&PrometheusResponse{
   342  				Headers: []*PrometheusResponseHeader{
   343  					{
   344  						Name:   "meaninglessheader",
   345  						Values: []string{},
   346  					},
   347  				},
   348  			}),
   349  			expected: true,
   350  		},
   351  		{
   352  			name:    "cacheGenNumber set in store but not in header",
   353  			request: &PrometheusRequest{Query: "metric"},
   354  			input: Response(&PrometheusResponse{
   355  				Headers: []*PrometheusResponseHeader{
   356  					{
   357  						Name:   "meaninglessheader",
   358  						Values: []string{},
   359  					},
   360  				},
   361  			}),
   362  			cacheGenNumberToInject: "1",
   363  			expected:               false,
   364  		},
   365  		{
   366  			name:    "cacheGenNumber set in header but not in store",
   367  			request: &PrometheusRequest{Query: "metric"},
   368  			input: Response(&PrometheusResponse{
   369  				Headers: []*PrometheusResponseHeader{
   370  					{
   371  						Name:   ResultsCacheGenNumberHeaderName,
   372  						Values: []string{"1"},
   373  					},
   374  				},
   375  			}),
   376  			expected: false,
   377  		},
   378  		{
   379  			name:    "cacheGenNumber in header and store are the same",
   380  			request: &PrometheusRequest{Query: "metric"},
   381  			input: Response(&PrometheusResponse{
   382  				Headers: []*PrometheusResponseHeader{
   383  					{
   384  						Name:   ResultsCacheGenNumberHeaderName,
   385  						Values: []string{"1", "1"},
   386  					},
   387  				},
   388  			}),
   389  			cacheGenNumberToInject: "1",
   390  			expected:               true,
   391  		},
   392  		{
   393  			name:    "inconsistency between cacheGenNumber in header and store",
   394  			request: &PrometheusRequest{Query: "metric"},
   395  			input: Response(&PrometheusResponse{
   396  				Headers: []*PrometheusResponseHeader{
   397  					{
   398  						Name:   ResultsCacheGenNumberHeaderName,
   399  						Values: []string{"1", "2"},
   400  					},
   401  				},
   402  			}),
   403  			cacheGenNumberToInject: "1",
   404  			expected:               false,
   405  		},
   406  		{
   407  			name:    "cacheControl header says not to catch and cacheGenNumbers in store and headers have consistency",
   408  			request: &PrometheusRequest{Query: "metric"},
   409  			input: Response(&PrometheusResponse{
   410  				Headers: []*PrometheusResponseHeader{
   411  					{
   412  						Name:   cacheControlHeader,
   413  						Values: []string{noStoreValue},
   414  					},
   415  					{
   416  						Name:   ResultsCacheGenNumberHeaderName,
   417  						Values: []string{"1", "1"},
   418  					},
   419  				},
   420  			}),
   421  			cacheGenNumberToInject: "1",
   422  			expected:               false,
   423  		},
   424  		// @ modifier on vector selectors.
   425  		{
   426  			name:     "@ modifier on vector selector, before end, before maxCacheTime",
   427  			request:  &PrometheusRequest{Query: "metric @ 123", End: 125000},
   428  			input:    Response(&PrometheusResponse{}),
   429  			expected: true,
   430  		},
   431  		{
   432  			name:     "@ modifier on vector selector, after end, before maxCacheTime",
   433  			request:  &PrometheusRequest{Query: "metric @ 127", End: 125000},
   434  			input:    Response(&PrometheusResponse{}),
   435  			expected: false,
   436  		},
   437  		{
   438  			name:     "@ modifier on vector selector, before end, after maxCacheTime",
   439  			request:  &PrometheusRequest{Query: "metric @ 151", End: 200000},
   440  			input:    Response(&PrometheusResponse{}),
   441  			expected: false,
   442  		},
   443  		{
   444  			name:     "@ modifier on vector selector, after end, after maxCacheTime",
   445  			request:  &PrometheusRequest{Query: "metric @ 151", End: 125000},
   446  			input:    Response(&PrometheusResponse{}),
   447  			expected: false,
   448  		},
   449  		{
   450  			name:     "@ modifier on vector selector with start() before maxCacheTime",
   451  			request:  &PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000},
   452  			input:    Response(&PrometheusResponse{}),
   453  			expected: true,
   454  		},
   455  		{
   456  			name:     "@ modifier on vector selector with end() after maxCacheTime",
   457  			request:  &PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000},
   458  			input:    Response(&PrometheusResponse{}),
   459  			expected: false,
   460  		},
   461  		// @ modifier on matrix selectors.
   462  		{
   463  			name:     "@ modifier on matrix selector, before end, before maxCacheTime",
   464  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000},
   465  			input:    Response(&PrometheusResponse{}),
   466  			expected: true,
   467  		},
   468  		{
   469  			name:     "@ modifier on matrix selector, after end, before maxCacheTime",
   470  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000},
   471  			input:    Response(&PrometheusResponse{}),
   472  			expected: false,
   473  		},
   474  		{
   475  			name:     "@ modifier on matrix selector, before end, after maxCacheTime",
   476  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000},
   477  			input:    Response(&PrometheusResponse{}),
   478  			expected: false,
   479  		},
   480  		{
   481  			name:     "@ modifier on matrix selector, after end, after maxCacheTime",
   482  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000},
   483  			input:    Response(&PrometheusResponse{}),
   484  			expected: false,
   485  		},
   486  		{
   487  			name:     "@ modifier on matrix selector with start() before maxCacheTime",
   488  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000},
   489  			input:    Response(&PrometheusResponse{}),
   490  			expected: true,
   491  		},
   492  		{
   493  			name:     "@ modifier on matrix selector with end() after maxCacheTime",
   494  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000},
   495  			input:    Response(&PrometheusResponse{}),
   496  			expected: false,
   497  		},
   498  		// @ modifier on subqueries.
   499  		{
   500  			name:     "@ modifier on subqueries, before end, before maxCacheTime",
   501  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000},
   502  			input:    Response(&PrometheusResponse{}),
   503  			expected: true,
   504  		},
   505  		{
   506  			name:     "@ modifier on subqueries, after end, before maxCacheTime",
   507  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000},
   508  			input:    Response(&PrometheusResponse{}),
   509  			expected: false,
   510  		},
   511  		{
   512  			name:     "@ modifier on subqueries, before end, after maxCacheTime",
   513  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000},
   514  			input:    Response(&PrometheusResponse{}),
   515  			expected: false,
   516  		},
   517  		{
   518  			name:     "@ modifier on subqueries, after end, after maxCacheTime",
   519  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000},
   520  			input:    Response(&PrometheusResponse{}),
   521  			expected: false,
   522  		},
   523  		{
   524  			name:     "@ modifier on subqueries with start() before maxCacheTime",
   525  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000},
   526  			input:    Response(&PrometheusResponse{}),
   527  			expected: true,
   528  		},
   529  		{
   530  			name:     "@ modifier on subqueries with end() after maxCacheTime",
   531  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000},
   532  			input:    Response(&PrometheusResponse{}),
   533  			expected: false,
   534  		},
   535  	} {
   536  		{
   537  			t.Run(tc.name, func(t *testing.T) {
   538  				ctx := cache.InjectCacheGenNumber(context.Background(), tc.cacheGenNumberToInject)
   539  				ret := c.shouldCacheResponse(ctx, tc.request, tc.input, maxCacheTime)
   540  				require.Equal(t, tc.expected, ret)
   541  			})
   542  		}
   543  	}
   544  }
   545  
   546  func TestPartition(t *testing.T) {
   547  	for _, tc := range []struct {
   548  		name                   string
   549  		input                  Request
   550  		prevCachedResponse     []Extent
   551  		expectedRequests       []Request
   552  		expectedCachedResponse []Response
   553  	}{
   554  		{
   555  			name: "Test a complete hit.",
   556  			input: &PrometheusRequest{
   557  				Start: 0,
   558  				End:   100,
   559  			},
   560  			prevCachedResponse: []Extent{
   561  				mkExtent(0, 100),
   562  			},
   563  			expectedCachedResponse: []Response{
   564  				mkAPIResponse(0, 100, 10),
   565  			},
   566  		},
   567  
   568  		{
   569  			name: "Test with a complete miss.",
   570  			input: &PrometheusRequest{
   571  				Start: 0,
   572  				End:   100,
   573  			},
   574  			prevCachedResponse: []Extent{
   575  				mkExtent(110, 210),
   576  			},
   577  			expectedRequests: []Request{
   578  				&PrometheusRequest{
   579  					Start: 0,
   580  					End:   100,
   581  				}},
   582  		},
   583  		{
   584  			name: "Test a partial hit.",
   585  			input: &PrometheusRequest{
   586  				Start: 0,
   587  				End:   100,
   588  			},
   589  			prevCachedResponse: []Extent{
   590  				mkExtent(50, 100),
   591  			},
   592  			expectedRequests: []Request{
   593  				&PrometheusRequest{
   594  					Start: 0,
   595  					End:   50,
   596  				},
   597  			},
   598  			expectedCachedResponse: []Response{
   599  				mkAPIResponse(50, 100, 10),
   600  			},
   601  		},
   602  		{
   603  			name: "Test multiple partial hits.",
   604  			input: &PrometheusRequest{
   605  				Start: 100,
   606  				End:   200,
   607  			},
   608  			prevCachedResponse: []Extent{
   609  				mkExtent(50, 120),
   610  				mkExtent(160, 250),
   611  			},
   612  			expectedRequests: []Request{
   613  				&PrometheusRequest{
   614  					Start: 120,
   615  					End:   160,
   616  				},
   617  			},
   618  			expectedCachedResponse: []Response{
   619  				mkAPIResponse(100, 120, 10),
   620  				mkAPIResponse(160, 200, 10),
   621  			},
   622  		},
   623  		{
   624  			name: "Partial hits with tiny gap.",
   625  			input: &PrometheusRequest{
   626  				Start: 100,
   627  				End:   160,
   628  			},
   629  			prevCachedResponse: []Extent{
   630  				mkExtent(50, 120),
   631  				mkExtent(122, 130),
   632  			},
   633  			expectedRequests: []Request{
   634  				&PrometheusRequest{
   635  					Start: 120,
   636  					End:   160,
   637  				},
   638  			},
   639  			expectedCachedResponse: []Response{
   640  				mkAPIResponse(100, 120, 10),
   641  			},
   642  		},
   643  		{
   644  			name: "Extent is outside the range and the request has a single step (same start and end).",
   645  			input: &PrometheusRequest{
   646  				Start: 100,
   647  				End:   100,
   648  			},
   649  			prevCachedResponse: []Extent{
   650  				mkExtent(50, 90),
   651  			},
   652  			expectedRequests: []Request{
   653  				&PrometheusRequest{
   654  					Start: 100,
   655  					End:   100,
   656  				},
   657  			},
   658  		},
   659  		{
   660  			name: "Test when hit has a large step and only a single sample extent.",
   661  			// If there is a only a single sample in the split interval, start and end will be the same.
   662  			input: &PrometheusRequest{
   663  				Start: 100,
   664  				End:   100,
   665  			},
   666  			prevCachedResponse: []Extent{
   667  				mkExtent(100, 100),
   668  			},
   669  			expectedCachedResponse: []Response{
   670  				mkAPIResponse(100, 105, 10),
   671  			},
   672  		},
   673  		{
   674  			name: "[Stats] Test a complete hit.",
   675  			input: &PrometheusRequest{
   676  				Start: 0,
   677  				End:   100,
   678  			},
   679  			prevCachedResponse: []Extent{
   680  				mkExtentWithStats(0, 100),
   681  			},
   682  			expectedCachedResponse: []Response{
   683  				mkAPIResponseWithStats(0, 100, 10, true),
   684  			},
   685  		},
   686  
   687  		{
   688  			name: "[Stats] Test with a complete miss.",
   689  			input: &PrometheusRequest{
   690  				Start: 0,
   691  				End:   100,
   692  			},
   693  			prevCachedResponse: []Extent{
   694  				mkExtentWithStats(110, 210),
   695  			},
   696  			expectedRequests: []Request{
   697  				&PrometheusRequest{
   698  					Start: 0,
   699  					End:   100,
   700  				}},
   701  		},
   702  		{
   703  			name: "[stats] Test a partial hit.",
   704  			input: &PrometheusRequest{
   705  				Start: 0,
   706  				End:   100,
   707  			},
   708  			prevCachedResponse: []Extent{
   709  				mkExtentWithStats(50, 100),
   710  			},
   711  			expectedRequests: []Request{
   712  				&PrometheusRequest{
   713  					Start: 0,
   714  					End:   50,
   715  				},
   716  			},
   717  			expectedCachedResponse: []Response{
   718  				mkAPIResponseWithStats(50, 100, 10, true),
   719  			},
   720  		},
   721  		{
   722  			name: "[stats] Test multiple partial hits.",
   723  			input: &PrometheusRequest{
   724  				Start: 100,
   725  				End:   200,
   726  			},
   727  			prevCachedResponse: []Extent{
   728  				mkExtentWithStats(50, 120),
   729  				mkExtentWithStats(160, 250),
   730  			},
   731  			expectedRequests: []Request{
   732  				&PrometheusRequest{
   733  					Start: 120,
   734  					End:   160,
   735  				},
   736  			},
   737  			expectedCachedResponse: []Response{
   738  				mkAPIResponseWithStats(100, 120, 10, true),
   739  				mkAPIResponseWithStats(160, 200, 10, true),
   740  			},
   741  		},
   742  		{
   743  			name: "[stats] Partial hits with tiny gap.",
   744  			input: &PrometheusRequest{
   745  				Start: 100,
   746  				End:   160,
   747  			},
   748  			prevCachedResponse: []Extent{
   749  				mkExtentWithStats(50, 120),
   750  				mkExtentWithStats(122, 130),
   751  			},
   752  			expectedRequests: []Request{
   753  				&PrometheusRequest{
   754  					Start: 120,
   755  					End:   160,
   756  				},
   757  			},
   758  			expectedCachedResponse: []Response{
   759  				mkAPIResponseWithStats(100, 120, 10, true),
   760  			},
   761  		},
   762  		{
   763  			name: "[stats] Extent is outside the range and the request has a single step (same start and end).",
   764  			input: &PrometheusRequest{
   765  				Start: 100,
   766  				End:   100,
   767  			},
   768  			prevCachedResponse: []Extent{
   769  				mkExtentWithStats(50, 90),
   770  			},
   771  			expectedRequests: []Request{
   772  				&PrometheusRequest{
   773  					Start: 100,
   774  					End:   100,
   775  				},
   776  			},
   777  		},
   778  		{
   779  			name: "[stats] Test when hit has a large step and only a single sample extent.",
   780  			// If there is a only a single sample in the split interval, start and end will be the same.
   781  			input: &PrometheusRequest{
   782  				Start: 100,
   783  				End:   100,
   784  			},
   785  			prevCachedResponse: []Extent{
   786  				mkExtentWithStats(100, 100),
   787  			},
   788  			expectedCachedResponse: []Response{
   789  				mkAPIResponseWithStats(100, 105, 10, true),
   790  			},
   791  		},
   792  	} {
   793  		t.Run(tc.name, func(t *testing.T) {
   794  			s := resultsCache{
   795  				extractor:      PrometheusResponseExtractor{},
   796  				minCacheExtent: 10,
   797  			}
   798  			reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
   799  			require.Nil(t, err)
   800  			require.Equal(t, tc.expectedRequests, reqs)
   801  			require.Equal(t, tc.expectedCachedResponse, resps)
   802  		})
   803  	}
   804  }
   805  
   806  func TestHandleHit(t *testing.T) {
   807  	for _, tc := range []struct {
   808  		name                       string
   809  		input                      Request
   810  		cachedEntry                []Extent
   811  		expectedUpdatedCachedEntry []Extent
   812  	}{
   813  		{
   814  			name: "Should drop tiny extent that overlaps with non-tiny request only",
   815  			input: &PrometheusRequest{
   816  				Start: 100,
   817  				End:   120,
   818  				Step:  5,
   819  			},
   820  			cachedEntry: []Extent{
   821  				mkExtentWithStep(0, 50, 5),
   822  				mkExtentWithStep(60, 65, 5),
   823  				mkExtentWithStep(100, 105, 5),
   824  				mkExtentWithStep(110, 150, 5),
   825  				mkExtentWithStep(160, 165, 5),
   826  			},
   827  			expectedUpdatedCachedEntry: []Extent{
   828  				mkExtentWithStep(0, 50, 5),
   829  				mkExtentWithStep(60, 65, 5),
   830  				mkExtentWithStep(100, 150, 5),
   831  				mkExtentWithStep(160, 165, 5),
   832  			},
   833  		},
   834  		{
   835  			name: "Should replace tiny extents that are cover by bigger request",
   836  			input: &PrometheusRequest{
   837  				Start: 100,
   838  				End:   200,
   839  				Step:  5,
   840  			},
   841  			cachedEntry: []Extent{
   842  				mkExtentWithStep(0, 50, 5),
   843  				mkExtentWithStep(60, 65, 5),
   844  				mkExtentWithStep(100, 105, 5),
   845  				mkExtentWithStep(110, 115, 5),
   846  				mkExtentWithStep(120, 125, 5),
   847  				mkExtentWithStep(220, 225, 5),
   848  				mkExtentWithStep(240, 250, 5),
   849  			},
   850  			expectedUpdatedCachedEntry: []Extent{
   851  				mkExtentWithStep(0, 50, 5),
   852  				mkExtentWithStep(60, 65, 5),
   853  				mkExtentWithStep(100, 200, 5),
   854  				mkExtentWithStep(220, 225, 5),
   855  				mkExtentWithStep(240, 250, 5),
   856  			},
   857  		},
   858  		{
   859  			name: "Should not drop tiny extent that completely overlaps with tiny request",
   860  			input: &PrometheusRequest{
   861  				Start: 100,
   862  				End:   105,
   863  				Step:  5,
   864  			},
   865  			cachedEntry: []Extent{
   866  				mkExtentWithStep(0, 50, 5),
   867  				mkExtentWithStep(60, 65, 5),
   868  				mkExtentWithStep(100, 105, 5),
   869  				mkExtentWithStep(160, 165, 5),
   870  			},
   871  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   872  		},
   873  		{
   874  			name: "Should not drop tiny extent that partially center-overlaps with tiny request",
   875  			input: &PrometheusRequest{
   876  				Start: 106,
   877  				End:   108,
   878  				Step:  2,
   879  			},
   880  			cachedEntry: []Extent{
   881  				mkExtentWithStep(60, 64, 2),
   882  				mkExtentWithStep(104, 110, 2),
   883  				mkExtentWithStep(160, 166, 2),
   884  			},
   885  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   886  		},
   887  		{
   888  			name: "Should not drop tiny extent that partially left-overlaps with tiny request",
   889  			input: &PrometheusRequest{
   890  				Start: 100,
   891  				End:   106,
   892  				Step:  2,
   893  			},
   894  			cachedEntry: []Extent{
   895  				mkExtentWithStep(60, 64, 2),
   896  				mkExtentWithStep(104, 110, 2),
   897  				mkExtentWithStep(160, 166, 2),
   898  			},
   899  			expectedUpdatedCachedEntry: []Extent{
   900  				mkExtentWithStep(60, 64, 2),
   901  				mkExtentWithStep(100, 110, 2),
   902  				mkExtentWithStep(160, 166, 2),
   903  			},
   904  		},
   905  		{
   906  			name: "Should not drop tiny extent that partially right-overlaps with tiny request",
   907  			input: &PrometheusRequest{
   908  				Start: 100,
   909  				End:   106,
   910  				Step:  2,
   911  			},
   912  			cachedEntry: []Extent{
   913  				mkExtentWithStep(60, 64, 2),
   914  				mkExtentWithStep(98, 102, 2),
   915  				mkExtentWithStep(160, 166, 2),
   916  			},
   917  			expectedUpdatedCachedEntry: []Extent{
   918  				mkExtentWithStep(60, 64, 2),
   919  				mkExtentWithStep(98, 106, 2),
   920  				mkExtentWithStep(160, 166, 2),
   921  			},
   922  		},
   923  		{
   924  			name: "Should merge fragmented extents if request fills the hole",
   925  			input: &PrometheusRequest{
   926  				Start: 40,
   927  				End:   80,
   928  				Step:  20,
   929  			},
   930  			cachedEntry: []Extent{
   931  				mkExtentWithStep(0, 20, 20),
   932  				mkExtentWithStep(80, 100, 20),
   933  			},
   934  			expectedUpdatedCachedEntry: []Extent{
   935  				mkExtentWithStep(0, 100, 20),
   936  			},
   937  		},
   938  		{
   939  			name: "Should left-extend extent if request starts earlier than extent in cache",
   940  			input: &PrometheusRequest{
   941  				Start: 40,
   942  				End:   80,
   943  				Step:  20,
   944  			},
   945  			cachedEntry: []Extent{
   946  				mkExtentWithStep(60, 160, 20),
   947  			},
   948  			expectedUpdatedCachedEntry: []Extent{
   949  				mkExtentWithStep(40, 160, 20),
   950  			},
   951  		},
   952  		{
   953  			name: "Should right-extend extent if request ends later than extent in cache",
   954  			input: &PrometheusRequest{
   955  				Start: 100,
   956  				End:   180,
   957  				Step:  20,
   958  			},
   959  			cachedEntry: []Extent{
   960  				mkExtentWithStep(60, 160, 20),
   961  			},
   962  			expectedUpdatedCachedEntry: []Extent{
   963  				mkExtentWithStep(60, 180, 20),
   964  			},
   965  		},
   966  		{
   967  			name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
   968  			input: &PrometheusRequest{
   969  				// This request is carefully crated such that cachedEntry is not used to fulfill
   970  				// the request.
   971  				Start: 160,
   972  				End:   180,
   973  				Step:  20,
   974  			},
   975  			cachedEntry: []Extent{
   976  				{
   977  					Start: 60,
   978  					End:   80,
   979  
   980  					// if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
   981  					// response would cause error during Extents merge phase. With the optimization
   982  					// this bad Extent should be dropped. The good Extent below can be used instead.
   983  					Response: nil,
   984  				},
   985  				mkExtentWithStep(60, 160, 20),
   986  			},
   987  			expectedUpdatedCachedEntry: []Extent{
   988  				mkExtentWithStep(60, 180, 20),
   989  			},
   990  		},
   991  	} {
   992  		t.Run(tc.name, func(t *testing.T) {
   993  			sut := resultsCache{
   994  				extractor:      PrometheusResponseExtractor{},
   995  				minCacheExtent: 10,
   996  				limits:         mockLimits{},
   997  				merger:         PrometheusCodec,
   998  				next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   999  					return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil
  1000  				}),
  1001  			}
  1002  
  1003  			ctx := user.InjectOrgID(context.Background(), "1")
  1004  			response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
  1005  			require.NoError(t, err)
  1006  
  1007  			expectedResponse := mkAPIResponse(tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep())
  1008  			require.Equal(t, expectedResponse, response, "response does not match the expectation")
  1009  			require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
  1010  		})
  1011  	}
  1012  }
  1013  
  1014  func TestResultsCache(t *testing.T) {
  1015  	calls := 0
  1016  	cfg := ResultsCacheConfig{
  1017  		CacheConfig: cache.Config{
  1018  			Cache: cache.NewMockCache(),
  1019  		},
  1020  	}
  1021  	rcm, _, err := NewResultsCacheMiddleware(
  1022  		log.NewNopLogger(),
  1023  		cfg,
  1024  		constSplitter(day),
  1025  		mockLimits{},
  1026  		PrometheusCodec,
  1027  		PrometheusResponseExtractor{},
  1028  		nil,
  1029  		nil,
  1030  		nil,
  1031  	)
  1032  	require.NoError(t, err)
  1033  
  1034  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
  1035  		calls++
  1036  		return parsedResponse, nil
  1037  	}))
  1038  	ctx := user.InjectOrgID(context.Background(), "1")
  1039  	resp, err := rc.Do(ctx, parsedRequest)
  1040  	require.NoError(t, err)
  1041  	require.Equal(t, 1, calls)
  1042  	require.Equal(t, parsedResponse, resp)
  1043  
  1044  	// Doing same request again shouldn't change anything.
  1045  	resp, err = rc.Do(ctx, parsedRequest)
  1046  	require.NoError(t, err)
  1047  	require.Equal(t, 1, calls)
  1048  	require.Equal(t, parsedResponse, resp)
  1049  
  1050  	// Doing request with new end time should do one more query.
  1051  	req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd()+100)
  1052  	_, err = rc.Do(ctx, req)
  1053  	require.NoError(t, err)
  1054  	require.Equal(t, 2, calls)
  1055  }
  1056  
  1057  func TestResultsCacheRecent(t *testing.T) {
  1058  	var cfg ResultsCacheConfig
  1059  	flagext.DefaultValues(&cfg)
  1060  	cfg.CacheConfig.Cache = cache.NewMockCache()
  1061  	cfg.CacheQueryableSamplesStats = true
  1062  	rcm, _, err := NewResultsCacheMiddleware(
  1063  		log.NewNopLogger(),
  1064  		cfg,
  1065  		constSplitter(day),
  1066  		mockLimits{maxCacheFreshness: 10 * time.Minute},
  1067  		PrometheusCodec,
  1068  		PrometheusResponseExtractor{},
  1069  		nil,
  1070  		nil,
  1071  		nil,
  1072  	)
  1073  	require.NoError(t, err)
  1074  
  1075  	req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now()))
  1076  
  1077  	calls := 0
  1078  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) {
  1079  		calls++
  1080  		assert.Equal(t, r, req)
  1081  		return parsedResponse, nil
  1082  	}))
  1083  	ctx := user.InjectOrgID(context.Background(), "1")
  1084  
  1085  	// Request should result in a query.
  1086  	resp, err := rc.Do(ctx, req)
  1087  	require.NoError(t, err)
  1088  	require.Equal(t, 1, calls)
  1089  	require.Equal(t, parsedResponse, resp)
  1090  
  1091  	// Doing same request again should result in another query.
  1092  	resp, err = rc.Do(ctx, req)
  1093  	require.NoError(t, err)
  1094  	require.Equal(t, 2, calls)
  1095  	require.Equal(t, parsedResponse, resp)
  1096  }
  1097  
  1098  func TestResultsCacheMaxFreshness(t *testing.T) {
  1099  	modelNow := model.Now()
  1100  	for i, tc := range []struct {
  1101  		fakeLimits       Limits
  1102  		Handler          HandlerFunc
  1103  		expectedResponse *PrometheusResponse
  1104  	}{
  1105  		{
  1106  			fakeLimits:       mockLimits{maxCacheFreshness: 5 * time.Second},
  1107  			Handler:          nil,
  1108  			expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
  1109  		},
  1110  		{
  1111  			// should not lookup cache because per-tenant override will be applied
  1112  			fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
  1113  			Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
  1114  				return parsedResponse, nil
  1115  			}),
  1116  			expectedResponse: parsedResponse,
  1117  		},
  1118  	} {
  1119  		t.Run(strconv.Itoa(i), func(t *testing.T) {
  1120  			var cfg ResultsCacheConfig
  1121  			flagext.DefaultValues(&cfg)
  1122  			cfg.CacheConfig.Cache = cache.NewMockCache()
  1123  
  1124  			fakeLimits := tc.fakeLimits
  1125  			rcm, _, err := NewResultsCacheMiddleware(
  1126  				log.NewNopLogger(),
  1127  				cfg,
  1128  				constSplitter(day),
  1129  				fakeLimits,
  1130  				PrometheusCodec,
  1131  				PrometheusResponseExtractor{},
  1132  				nil,
  1133  				nil,
  1134  				nil,
  1135  			)
  1136  			require.NoError(t, err)
  1137  
  1138  			// create cache with handler
  1139  			rc := rcm.Wrap(tc.Handler)
  1140  			ctx := user.InjectOrgID(context.Background(), "1")
  1141  
  1142  			// create request with start end within the key extents
  1143  			req := parsedRequest.WithStartEnd(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3))
  1144  
  1145  			// fill cache
  1146  			key := constSplitter(day).GenerateCacheKey("1", req)
  1147  			rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
  1148  
  1149  			resp, err := rc.Do(ctx, req)
  1150  			require.NoError(t, err)
  1151  			require.Equal(t, tc.expectedResponse, resp)
  1152  		})
  1153  	}
  1154  }
  1155  
  1156  func Test_resultsCache_MissingData(t *testing.T) {
  1157  	cfg := ResultsCacheConfig{
  1158  		CacheConfig: cache.Config{
  1159  			Cache: cache.NewMockCache(),
  1160  		},
  1161  	}
  1162  	rm, _, err := NewResultsCacheMiddleware(
  1163  		log.NewNopLogger(),
  1164  		cfg,
  1165  		constSplitter(day),
  1166  		mockLimits{},
  1167  		PrometheusCodec,
  1168  		PrometheusResponseExtractor{},
  1169  		nil,
  1170  		nil,
  1171  		nil,
  1172  	)
  1173  	require.NoError(t, err)
  1174  	rc := rm.Wrap(nil).(*resultsCache)
  1175  	ctx := context.Background()
  1176  
  1177  	// fill up the cache
  1178  	rc.put(ctx, "empty", []Extent{{
  1179  		Start:    100,
  1180  		End:      200,
  1181  		Response: nil,
  1182  	}})
  1183  	rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
  1184  	rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
  1185  		Start:    120,
  1186  		End:      200,
  1187  		Response: nil,
  1188  	}})
  1189  
  1190  	extents, hit := rc.get(ctx, "empty")
  1191  	require.Empty(t, extents)
  1192  	require.False(t, hit)
  1193  
  1194  	extents, hit = rc.get(ctx, "notempty")
  1195  	require.Equal(t, len(extents), 1)
  1196  	require.True(t, hit)
  1197  
  1198  	extents, hit = rc.get(ctx, "mixed")
  1199  	require.Equal(t, len(extents), 0)
  1200  	require.False(t, hit)
  1201  }
  1202  
  1203  func TestConstSplitter_generateCacheKey(t *testing.T) {
  1204  	t.Parallel()
  1205  
  1206  	tests := []struct {
  1207  		name     string
  1208  		r        Request
  1209  		interval time.Duration
  1210  		want     string
  1211  	}{
  1212  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
  1213  		{"<30m", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
  1214  		{"30m", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"},
  1215  		{"91m", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"},
  1216  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
  1217  		{"<1d", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
  1218  		{"4d", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"},
  1219  		{"3d5h", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"},
  1220  	}
  1221  	for _, tt := range tests {
  1222  		t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
  1223  			if got := constSplitter(tt.interval).GenerateCacheKey("fake", tt.r); got != tt.want {
  1224  				t.Errorf("generateKey() = %v, want %v", got, tt.want)
  1225  			}
  1226  		})
  1227  	}
  1228  }
  1229  
  1230  func TestResultsCacheShouldCacheFunc(t *testing.T) {
  1231  	testcases := []struct {
  1232  		name         string
  1233  		shouldCache  ShouldCacheFn
  1234  		requests     []Request
  1235  		expectedCall int
  1236  	}{
  1237  		{
  1238  			name:         "normal",
  1239  			shouldCache:  nil,
  1240  			requests:     []Request{parsedRequest, parsedRequest},
  1241  			expectedCall: 1,
  1242  		},
  1243  		{
  1244  			name: "always no cache",
  1245  			shouldCache: func(r Request) bool {
  1246  				return false
  1247  			},
  1248  			requests:     []Request{parsedRequest, parsedRequest},
  1249  			expectedCall: 2,
  1250  		},
  1251  		{
  1252  			name: "check cache based on request",
  1253  			shouldCache: func(r Request) bool {
  1254  				return !r.GetCachingOptions().Disabled
  1255  			},
  1256  			requests:     []Request{noCacheRequest, noCacheRequest},
  1257  			expectedCall: 2,
  1258  		},
  1259  	}
  1260  
  1261  	for _, tc := range testcases {
  1262  		t.Run(tc.name, func(t *testing.T) {
  1263  			calls := 0
  1264  			var cfg ResultsCacheConfig
  1265  			flagext.DefaultValues(&cfg)
  1266  			cfg.CacheConfig.Cache = cache.NewMockCache()
  1267  			rcm, _, err := NewResultsCacheMiddleware(
  1268  				log.NewNopLogger(),
  1269  				cfg,
  1270  				constSplitter(day),
  1271  				mockLimits{maxCacheFreshness: 10 * time.Minute},
  1272  				PrometheusCodec,
  1273  				PrometheusResponseExtractor{},
  1274  				nil,
  1275  				tc.shouldCache,
  1276  				nil,
  1277  			)
  1278  			require.NoError(t, err)
  1279  			rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
  1280  				calls++
  1281  				return parsedResponse, nil
  1282  			}))
  1283  
  1284  			for _, req := range tc.requests {
  1285  				ctx := user.InjectOrgID(context.Background(), "1")
  1286  				_, err := rc.Do(ctx, req)
  1287  				require.NoError(t, err)
  1288  			}
  1289  
  1290  			require.Equal(t, tc.expectedCall, calls)
  1291  		})
  1292  	}
  1293  }
  1294  
  1295  func TestNativeHistograms(t *testing.T) {
  1296  	calls := 0
  1297  	cfg := ResultsCacheConfig{
  1298  		CacheConfig: cache.Config{
  1299  			Cache: cache.NewMockCache(),
  1300  		},
  1301  	}
  1302  	rcm, _, err := NewResultsCacheMiddleware(
  1303  		log.NewNopLogger(),
  1304  		cfg,
  1305  		constSplitter(day),
  1306  		mockLimits{},
  1307  		PrometheusCodec,
  1308  		PrometheusResponseExtractor{},
  1309  		nil,
  1310  		nil,
  1311  		nil,
  1312  	)
  1313  	require.NoError(t, err)
  1314  
  1315  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
  1316  		calls++
  1317  		return parsedHistogramResponse, nil
  1318  	}))
  1319  	ctx := user.InjectOrgID(context.Background(), "1")
  1320  	resp, err := rc.Do(ctx, parsedHistogramRequest)
  1321  	require.NoError(t, err)
  1322  	require.Equal(t, 1, calls)
  1323  	require.Equal(t, parsedHistogramResponse, resp)
  1324  
  1325  	// Doing same request again shouldn't change anything.
  1326  	resp, err = rc.Do(ctx, parsedHistogramRequest)
  1327  	require.NoError(t, err)
  1328  	require.Equal(t, 1, calls)
  1329  	require.Equal(t, parsedHistogramResponse, resp)
  1330  
  1331  	// Doing request with new end time should do one more query.
  1332  	req := parsedHistogramRequest.WithStartEnd(parsedHistogramRequest.GetStart(), parsedHistogramRequest.GetEnd()+100)
  1333  	_, err = rc.Do(ctx, req)
  1334  	require.NoError(t, err)
  1335  	require.Equal(t, 2, calls)
  1336  }
  1337  
  1338  func toMs(t time.Duration) int64 {
  1339  	return int64(t / time.Millisecond)
  1340  }
  1341  
  1342  type mockCacheGenNumberLoader struct {
  1343  }
  1344  
  1345  func newMockCacheGenNumberLoader() CacheGenNumberLoader {
  1346  	return mockCacheGenNumberLoader{}
  1347  }
  1348  
  1349  func (mockCacheGenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string {
  1350  	return ""
  1351  }
  1352  
  1353  func genSampleHistogram() SampleHistogram {
  1354  	return SampleHistogram{
  1355  		Count: 5,
  1356  		Sum:   18.4,
  1357  		Buckets: []*HistogramBucket{
  1358  			{
  1359  				Boundaries: 3,
  1360  				Lower:      -0.001,
  1361  				Upper:      0.001,
  1362  				Count:      2,
  1363  			},
  1364  			{
  1365  				Boundaries: 0,
  1366  				Lower:      0.7071067811865475,
  1367  				Upper:      1,
  1368  				Count:      1,
  1369  			},
  1370  			{
  1371  				Boundaries: 0,
  1372  				Lower:      1,
  1373  				Upper:      1.414213562373095,
  1374  				Count:      2,
  1375  			},
  1376  			{
  1377  				Boundaries: 0,
  1378  				Lower:      2,
  1379  				Upper:      2.82842712474619,
  1380  				Count:      1,
  1381  			},
  1382  			{
  1383  				Boundaries: 0,
  1384  				Lower:      2.82842712474619,
  1385  				Upper:      4,
  1386  				Count:      1,
  1387  			},
  1388  		},
  1389  	}
  1390  }