github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/querier/queryrange/queryrangebase/results_cache_test.go (about)

     1  package queryrangebase
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"strconv"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/go-kit/log"
    11  	"github.com/gogo/protobuf/types"
    12  	"github.com/grafana/dskit/flagext"
    13  	"github.com/prometheus/common/model"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  	"github.com/weaveworks/common/user"
    17  
    18  	"github.com/grafana/loki/pkg/logproto"
    19  	"github.com/grafana/loki/pkg/logqlmodel/stats"
    20  	"github.com/grafana/loki/pkg/storage/chunk/cache"
    21  )
    22  
    23  const (
    24  	query        = "/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"
    25  	responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}`
    26  )
    27  
    28  var (
    29  	parsedRequest = &PrometheusRequest{
    30  		Path:  "/api/v1/query_range",
    31  		Start: 1536673680 * 1e3,
    32  		End:   1536716898 * 1e3,
    33  		Step:  120 * 1e3,
    34  		Query: "sum(container_memory_rss) by (namespace)",
    35  	}
    36  	reqHeaders = []*PrometheusRequestHeader{
    37  		{
    38  			Name:   "Test-Header",
    39  			Values: []string{"test"},
    40  		},
    41  	}
    42  	noCacheRequest = &PrometheusRequest{
    43  		Path:           "/api/v1/query_range",
    44  		Start:          1536673680 * 1e3,
    45  		End:            1536716898 * 1e3,
    46  		Step:           120 * 1e3,
    47  		Query:          "sum(container_memory_rss) by (namespace)",
    48  		CachingOptions: CachingOptions{Disabled: true},
    49  	}
    50  	respHeaders = []*PrometheusResponseHeader{
    51  		{
    52  			Name:   "Content-Type",
    53  			Values: []string{"application/json"},
    54  		},
    55  	}
    56  	parsedResponse = &PrometheusResponse{
    57  		Status: "success",
    58  		Data: PrometheusData{
    59  			ResultType: model.ValMatrix.String(),
    60  			Result: []SampleStream{
    61  				{
    62  					Labels: []logproto.LabelAdapter{
    63  						{Name: "foo", Value: "bar"},
    64  					},
    65  					Samples: []logproto.LegacySample{
    66  						{Value: 137, TimestampMs: 1536673680000},
    67  						{Value: 137, TimestampMs: 1536673780000},
    68  					},
    69  				},
    70  			},
    71  		},
    72  	}
    73  )
    74  
    75  func mkAPIResponse(start, end, step int64) *PrometheusResponse {
    76  	var samples []logproto.LegacySample
    77  	for i := start; i <= end; i += step {
    78  		samples = append(samples, logproto.LegacySample{
    79  			TimestampMs: i,
    80  			Value:       float64(i),
    81  		})
    82  	}
    83  
    84  	return &PrometheusResponse{
    85  		Status: StatusSuccess,
    86  		Data: PrometheusData{
    87  			ResultType: matrix,
    88  			Result: []SampleStream{
    89  				{
    90  					Labels: []logproto.LabelAdapter{
    91  						{Name: "foo", Value: "bar"},
    92  					},
    93  					Samples: samples,
    94  				},
    95  			},
    96  		},
    97  	}
    98  }
    99  
   100  func mkExtent(start, end int64) Extent {
   101  	return mkExtentWithStep(start, end, 10)
   102  }
   103  
   104  func mkExtentWithStep(start, end, step int64) Extent {
   105  	res := mkAPIResponse(start, end, step)
   106  	any, err := types.MarshalAny(res)
   107  	if err != nil {
   108  		panic(err)
   109  	}
   110  	return Extent{
   111  		Start:    start,
   112  		End:      end,
   113  		Response: any,
   114  	}
   115  }
   116  
   117  func TestShouldCache(t *testing.T) {
   118  	maxCacheTime := int64(150 * 1000)
   119  	c := &resultsCache{logger: log.NewNopLogger(), cacheGenNumberLoader: newMockCacheGenNumberLoader()}
   120  	for _, tc := range []struct {
   121  		name                   string
   122  		request                Request
   123  		input                  Response
   124  		cacheGenNumberToInject string
   125  		expected               bool
   126  	}{
   127  		// Tests only for cacheControlHeader
   128  		{
   129  			name:    "does not contain the cacheControl header",
   130  			request: &PrometheusRequest{Query: "metric"},
   131  			input: Response(&PrometheusResponse{
   132  				Headers: []*PrometheusResponseHeader{
   133  					{
   134  						Name:   "meaninglessheader",
   135  						Values: []string{},
   136  					},
   137  				},
   138  			}),
   139  			expected: true,
   140  		},
   141  		{
   142  			name:    "does contain the cacheControl header which has the value",
   143  			request: &PrometheusRequest{Query: "metric"},
   144  			input: Response(&PrometheusResponse{
   145  				Headers: []*PrometheusResponseHeader{
   146  					{
   147  						Name:   cacheControlHeader,
   148  						Values: []string{noStoreValue},
   149  					},
   150  				},
   151  			}),
   152  			expected: false,
   153  		},
   154  		{
   155  			name:    "cacheControl header contains extra values but still good",
   156  			request: &PrometheusRequest{Query: "metric"},
   157  			input: Response(&PrometheusResponse{
   158  				Headers: []*PrometheusResponseHeader{
   159  					{
   160  						Name:   cacheControlHeader,
   161  						Values: []string{"foo", noStoreValue},
   162  					},
   163  				},
   164  			}),
   165  			expected: false,
   166  		},
   167  		{
   168  			name:     "broken response",
   169  			request:  &PrometheusRequest{Query: "metric"},
   170  			input:    Response(&PrometheusResponse{}),
   171  			expected: true,
   172  		},
   173  		{
   174  			name:    "nil headers",
   175  			request: &PrometheusRequest{Query: "metric"},
   176  			input: Response(&PrometheusResponse{
   177  				Headers: []*PrometheusResponseHeader{nil},
   178  			}),
   179  			expected: true,
   180  		},
   181  		{
   182  			name:    "had cacheControl header but no values",
   183  			request: &PrometheusRequest{Query: "metric"},
   184  			input: Response(&PrometheusResponse{
   185  				Headers: []*PrometheusResponseHeader{{Name: cacheControlHeader}},
   186  			}),
   187  			expected: true,
   188  		},
   189  
   190  		// Tests only for cacheGenNumber header
   191  		{
   192  			name:    "cacheGenNumber not set in both header and store",
   193  			request: &PrometheusRequest{Query: "metric"},
   194  			input: Response(&PrometheusResponse{
   195  				Headers: []*PrometheusResponseHeader{
   196  					{
   197  						Name:   "meaninglessheader",
   198  						Values: []string{},
   199  					},
   200  				},
   201  			}),
   202  			expected: true,
   203  		},
   204  		{
   205  			name:    "cacheGenNumber set in store but not in header",
   206  			request: &PrometheusRequest{Query: "metric"},
   207  			input: Response(&PrometheusResponse{
   208  				Headers: []*PrometheusResponseHeader{
   209  					{
   210  						Name:   "meaninglessheader",
   211  						Values: []string{},
   212  					},
   213  				},
   214  			}),
   215  			cacheGenNumberToInject: "1",
   216  			expected:               false,
   217  		},
   218  		{
   219  			name:    "cacheGenNumber set in header but not in store",
   220  			request: &PrometheusRequest{Query: "metric"},
   221  			input: Response(&PrometheusResponse{
   222  				Headers: []*PrometheusResponseHeader{
   223  					{
   224  						Name:   ResultsCacheGenNumberHeaderName,
   225  						Values: []string{"1"},
   226  					},
   227  				},
   228  			}),
   229  			expected: false,
   230  		},
   231  		{
   232  			name:    "cacheGenNumber in header and store are the same",
   233  			request: &PrometheusRequest{Query: "metric"},
   234  			input: Response(&PrometheusResponse{
   235  				Headers: []*PrometheusResponseHeader{
   236  					{
   237  						Name:   ResultsCacheGenNumberHeaderName,
   238  						Values: []string{"1", "1"},
   239  					},
   240  				},
   241  			}),
   242  			cacheGenNumberToInject: "1",
   243  			expected:               true,
   244  		},
   245  		{
   246  			name:    "inconsistency between cacheGenNumber in header and store",
   247  			request: &PrometheusRequest{Query: "metric"},
   248  			input: Response(&PrometheusResponse{
   249  				Headers: []*PrometheusResponseHeader{
   250  					{
   251  						Name:   ResultsCacheGenNumberHeaderName,
   252  						Values: []string{"1", "2"},
   253  					},
   254  				},
   255  			}),
   256  			cacheGenNumberToInject: "1",
   257  			expected:               false,
   258  		},
   259  		{
   260  			name:    "cacheControl header says not to catch and cacheGenNumbers in store and headers have consistency",
   261  			request: &PrometheusRequest{Query: "metric"},
   262  			input: Response(&PrometheusResponse{
   263  				Headers: []*PrometheusResponseHeader{
   264  					{
   265  						Name:   cacheControlHeader,
   266  						Values: []string{noStoreValue},
   267  					},
   268  					{
   269  						Name:   ResultsCacheGenNumberHeaderName,
   270  						Values: []string{"1", "1"},
   271  					},
   272  				},
   273  			}),
   274  			cacheGenNumberToInject: "1",
   275  			expected:               false,
   276  		},
   277  		// @ modifier on vector selectors.
   278  		{
   279  			name:     "@ modifier on vector selector, before end, before maxCacheTime",
   280  			request:  &PrometheusRequest{Query: "metric @ 123", End: 125000},
   281  			input:    Response(&PrometheusResponse{}),
   282  			expected: true,
   283  		},
   284  		{
   285  			name:     "@ modifier on vector selector, after end, before maxCacheTime",
   286  			request:  &PrometheusRequest{Query: "metric @ 127", End: 125000},
   287  			input:    Response(&PrometheusResponse{}),
   288  			expected: false,
   289  		},
   290  		{
   291  			name:     "@ modifier on vector selector, before end, after maxCacheTime",
   292  			request:  &PrometheusRequest{Query: "metric @ 151", End: 200000},
   293  			input:    Response(&PrometheusResponse{}),
   294  			expected: false,
   295  		},
   296  		{
   297  			name:     "@ modifier on vector selector, after end, after maxCacheTime",
   298  			request:  &PrometheusRequest{Query: "metric @ 151", End: 125000},
   299  			input:    Response(&PrometheusResponse{}),
   300  			expected: false,
   301  		},
   302  		{
   303  			name:     "@ modifier on vector selector with start() before maxCacheTime",
   304  			request:  &PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000},
   305  			input:    Response(&PrometheusResponse{}),
   306  			expected: true,
   307  		},
   308  		{
   309  			name:     "@ modifier on vector selector with end() after maxCacheTime",
   310  			request:  &PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000},
   311  			input:    Response(&PrometheusResponse{}),
   312  			expected: false,
   313  		},
   314  		// @ modifier on matrix selectors.
   315  		{
   316  			name:     "@ modifier on matrix selector, before end, before maxCacheTime",
   317  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000},
   318  			input:    Response(&PrometheusResponse{}),
   319  			expected: true,
   320  		},
   321  		{
   322  			name:     "@ modifier on matrix selector, after end, before maxCacheTime",
   323  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000},
   324  			input:    Response(&PrometheusResponse{}),
   325  			expected: false,
   326  		},
   327  		{
   328  			name:     "@ modifier on matrix selector, before end, after maxCacheTime",
   329  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000},
   330  			input:    Response(&PrometheusResponse{}),
   331  			expected: false,
   332  		},
   333  		{
   334  			name:     "@ modifier on matrix selector, after end, after maxCacheTime",
   335  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000},
   336  			input:    Response(&PrometheusResponse{}),
   337  			expected: false,
   338  		},
   339  		{
   340  			name:     "@ modifier on matrix selector with start() before maxCacheTime",
   341  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000},
   342  			input:    Response(&PrometheusResponse{}),
   343  			expected: true,
   344  		},
   345  		{
   346  			name:     "@ modifier on matrix selector with end() after maxCacheTime",
   347  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000},
   348  			input:    Response(&PrometheusResponse{}),
   349  			expected: false,
   350  		},
   351  		// @ modifier on subqueries.
   352  		{
   353  			name:     "@ modifier on subqueries, before end, before maxCacheTime",
   354  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000},
   355  			input:    Response(&PrometheusResponse{}),
   356  			expected: true,
   357  		},
   358  		{
   359  			name:     "@ modifier on subqueries, after end, before maxCacheTime",
   360  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000},
   361  			input:    Response(&PrometheusResponse{}),
   362  			expected: false,
   363  		},
   364  		{
   365  			name:     "@ modifier on subqueries, before end, after maxCacheTime",
   366  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000},
   367  			input:    Response(&PrometheusResponse{}),
   368  			expected: false,
   369  		},
   370  		{
   371  			name:     "@ modifier on subqueries, after end, after maxCacheTime",
   372  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000},
   373  			input:    Response(&PrometheusResponse{}),
   374  			expected: false,
   375  		},
   376  		{
   377  			name:     "@ modifier on subqueries with start() before maxCacheTime",
   378  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000},
   379  			input:    Response(&PrometheusResponse{}),
   380  			expected: true,
   381  		},
   382  		{
   383  			name:     "@ modifier on subqueries with end() after maxCacheTime",
   384  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000},
   385  			input:    Response(&PrometheusResponse{}),
   386  			expected: false,
   387  		},
   388  	} {
   389  		{
   390  			t.Run(tc.name, func(t *testing.T) {
   391  				ctx := cache.InjectCacheGenNumber(context.Background(), tc.cacheGenNumberToInject)
   392  				ret := c.shouldCacheResponse(ctx, tc.request, tc.input, maxCacheTime)
   393  				require.Equal(t, tc.expected, ret)
   394  			})
   395  		}
   396  	}
   397  }
   398  
   399  func TestPartition(t *testing.T) {
   400  	for _, tc := range []struct {
   401  		name                   string
   402  		input                  Request
   403  		prevCachedResponse     []Extent
   404  		expectedRequests       []Request
   405  		expectedCachedResponse []Response
   406  	}{
   407  		{
   408  			name: "Test a complete hit.",
   409  			input: &PrometheusRequest{
   410  				Start: 0,
   411  				End:   100,
   412  			},
   413  			prevCachedResponse: []Extent{
   414  				mkExtent(0, 100),
   415  			},
   416  			expectedCachedResponse: []Response{
   417  				mkAPIResponse(0, 100, 10),
   418  			},
   419  		},
   420  
   421  		{
   422  			name: "Test with a complete miss.",
   423  			input: &PrometheusRequest{
   424  				Start: 0,
   425  				End:   100,
   426  			},
   427  			prevCachedResponse: []Extent{
   428  				mkExtent(110, 210),
   429  			},
   430  			expectedRequests: []Request{
   431  				&PrometheusRequest{
   432  					Start: 0,
   433  					End:   100,
   434  				},
   435  			},
   436  		},
   437  		{
   438  			name: "Test a partial hit.",
   439  			input: &PrometheusRequest{
   440  				Start: 0,
   441  				End:   100,
   442  			},
   443  			prevCachedResponse: []Extent{
   444  				mkExtent(50, 100),
   445  			},
   446  			expectedRequests: []Request{
   447  				&PrometheusRequest{
   448  					Start: 0,
   449  					End:   50,
   450  				},
   451  			},
   452  			expectedCachedResponse: []Response{
   453  				mkAPIResponse(50, 100, 10),
   454  			},
   455  		},
   456  		{
   457  			name: "Test multiple partial hits.",
   458  			input: &PrometheusRequest{
   459  				Start: 100,
   460  				End:   200,
   461  			},
   462  			prevCachedResponse: []Extent{
   463  				mkExtent(50, 120),
   464  				mkExtent(160, 250),
   465  			},
   466  			expectedRequests: []Request{
   467  				&PrometheusRequest{
   468  					Start: 120,
   469  					End:   160,
   470  				},
   471  			},
   472  			expectedCachedResponse: []Response{
   473  				mkAPIResponse(100, 120, 10),
   474  				mkAPIResponse(160, 200, 10),
   475  			},
   476  		},
   477  		{
   478  			name: "Partial hits with tiny gap.",
   479  			input: &PrometheusRequest{
   480  				Start: 100,
   481  				End:   160,
   482  			},
   483  			prevCachedResponse: []Extent{
   484  				mkExtent(50, 120),
   485  				mkExtent(122, 130),
   486  			},
   487  			expectedRequests: []Request{
   488  				&PrometheusRequest{
   489  					Start: 120,
   490  					End:   160,
   491  				},
   492  			},
   493  			expectedCachedResponse: []Response{
   494  				mkAPIResponse(100, 120, 10),
   495  			},
   496  		},
   497  		{
   498  			name: "Extent is outside the range and the request has a single step (same start and end).",
   499  			input: &PrometheusRequest{
   500  				Start: 100,
   501  				End:   100,
   502  			},
   503  			prevCachedResponse: []Extent{
   504  				mkExtent(50, 90),
   505  			},
   506  			expectedRequests: []Request{
   507  				&PrometheusRequest{
   508  					Start: 100,
   509  					End:   100,
   510  				},
   511  			},
   512  		},
   513  		{
   514  			name: "Test when hit has a large step and only a single sample extent.",
   515  			// If there is a only a single sample in the split interval, start and end will be the same.
   516  			input: &PrometheusRequest{
   517  				Start: 100,
   518  				End:   100,
   519  			},
   520  			prevCachedResponse: []Extent{
   521  				mkExtent(100, 100),
   522  			},
   523  			expectedCachedResponse: []Response{
   524  				mkAPIResponse(100, 105, 10),
   525  			},
   526  		},
   527  	} {
   528  		t.Run(tc.name, func(t *testing.T) {
   529  			s := resultsCache{
   530  				extractor:      PrometheusResponseExtractor{},
   531  				minCacheExtent: 10,
   532  			}
   533  			reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
   534  			require.Nil(t, err)
   535  			require.Equal(t, tc.expectedRequests, reqs)
   536  			require.Equal(t, tc.expectedCachedResponse, resps)
   537  		})
   538  	}
   539  }
   540  
   541  func TestHandleHit(t *testing.T) {
   542  	for _, tc := range []struct {
   543  		name                       string
   544  		input                      Request
   545  		cachedEntry                []Extent
   546  		expectedUpdatedCachedEntry []Extent
   547  	}{
   548  		{
   549  			name: "Should drop tiny extent that overlaps with non-tiny request only",
   550  			input: &PrometheusRequest{
   551  				Start: 100,
   552  				End:   120,
   553  				Step:  5,
   554  			},
   555  			cachedEntry: []Extent{
   556  				mkExtentWithStep(0, 50, 5),
   557  				mkExtentWithStep(60, 65, 5),
   558  				mkExtentWithStep(100, 105, 5),
   559  				mkExtentWithStep(110, 150, 5),
   560  				mkExtentWithStep(160, 165, 5),
   561  			},
   562  			expectedUpdatedCachedEntry: []Extent{
   563  				mkExtentWithStep(0, 50, 5),
   564  				mkExtentWithStep(60, 65, 5),
   565  				mkExtentWithStep(100, 150, 5),
   566  				mkExtentWithStep(160, 165, 5),
   567  			},
   568  		},
   569  		{
   570  			name: "Should replace tiny extents that are cover by bigger request",
   571  			input: &PrometheusRequest{
   572  				Start: 100,
   573  				End:   200,
   574  				Step:  5,
   575  			},
   576  			cachedEntry: []Extent{
   577  				mkExtentWithStep(0, 50, 5),
   578  				mkExtentWithStep(60, 65, 5),
   579  				mkExtentWithStep(100, 105, 5),
   580  				mkExtentWithStep(110, 115, 5),
   581  				mkExtentWithStep(120, 125, 5),
   582  				mkExtentWithStep(220, 225, 5),
   583  				mkExtentWithStep(240, 250, 5),
   584  			},
   585  			expectedUpdatedCachedEntry: []Extent{
   586  				mkExtentWithStep(0, 50, 5),
   587  				mkExtentWithStep(60, 65, 5),
   588  				mkExtentWithStep(100, 200, 5),
   589  				mkExtentWithStep(220, 225, 5),
   590  				mkExtentWithStep(240, 250, 5),
   591  			},
   592  		},
   593  		{
   594  			name: "Should not drop tiny extent that completely overlaps with tiny request",
   595  			input: &PrometheusRequest{
   596  				Start: 100,
   597  				End:   105,
   598  				Step:  5,
   599  			},
   600  			cachedEntry: []Extent{
   601  				mkExtentWithStep(0, 50, 5),
   602  				mkExtentWithStep(60, 65, 5),
   603  				mkExtentWithStep(100, 105, 5),
   604  				mkExtentWithStep(160, 165, 5),
   605  			},
   606  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   607  		},
   608  		{
   609  			name: "Should not drop tiny extent that partially center-overlaps with tiny request",
   610  			input: &PrometheusRequest{
   611  				Start: 106,
   612  				End:   108,
   613  				Step:  2,
   614  			},
   615  			cachedEntry: []Extent{
   616  				mkExtentWithStep(60, 64, 2),
   617  				mkExtentWithStep(104, 110, 2),
   618  				mkExtentWithStep(160, 166, 2),
   619  			},
   620  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   621  		},
   622  		{
   623  			name: "Should not drop tiny extent that partially left-overlaps with tiny request",
   624  			input: &PrometheusRequest{
   625  				Start: 100,
   626  				End:   106,
   627  				Step:  2,
   628  			},
   629  			cachedEntry: []Extent{
   630  				mkExtentWithStep(60, 64, 2),
   631  				mkExtentWithStep(104, 110, 2),
   632  				mkExtentWithStep(160, 166, 2),
   633  			},
   634  			expectedUpdatedCachedEntry: []Extent{
   635  				mkExtentWithStep(60, 64, 2),
   636  				mkExtentWithStep(100, 110, 2),
   637  				mkExtentWithStep(160, 166, 2),
   638  			},
   639  		},
   640  		{
   641  			name: "Should not drop tiny extent that partially right-overlaps with tiny request",
   642  			input: &PrometheusRequest{
   643  				Start: 100,
   644  				End:   106,
   645  				Step:  2,
   646  			},
   647  			cachedEntry: []Extent{
   648  				mkExtentWithStep(60, 64, 2),
   649  				mkExtentWithStep(98, 102, 2),
   650  				mkExtentWithStep(160, 166, 2),
   651  			},
   652  			expectedUpdatedCachedEntry: []Extent{
   653  				mkExtentWithStep(60, 64, 2),
   654  				mkExtentWithStep(98, 106, 2),
   655  				mkExtentWithStep(160, 166, 2),
   656  			},
   657  		},
   658  		{
   659  			name: "Should merge fragmented extents if request fills the hole",
   660  			input: &PrometheusRequest{
   661  				Start: 40,
   662  				End:   80,
   663  				Step:  20,
   664  			},
   665  			cachedEntry: []Extent{
   666  				mkExtentWithStep(0, 20, 20),
   667  				mkExtentWithStep(80, 100, 20),
   668  			},
   669  			expectedUpdatedCachedEntry: []Extent{
   670  				mkExtentWithStep(0, 100, 20),
   671  			},
   672  		},
   673  		{
   674  			name: "Should left-extend extent if request starts earlier than extent in cache",
   675  			input: &PrometheusRequest{
   676  				Start: 40,
   677  				End:   80,
   678  				Step:  20,
   679  			},
   680  			cachedEntry: []Extent{
   681  				mkExtentWithStep(60, 160, 20),
   682  			},
   683  			expectedUpdatedCachedEntry: []Extent{
   684  				mkExtentWithStep(40, 160, 20),
   685  			},
   686  		},
   687  		{
   688  			name: "Should right-extend extent if request ends later than extent in cache",
   689  			input: &PrometheusRequest{
   690  				Start: 100,
   691  				End:   180,
   692  				Step:  20,
   693  			},
   694  			cachedEntry: []Extent{
   695  				mkExtentWithStep(60, 160, 20),
   696  			},
   697  			expectedUpdatedCachedEntry: []Extent{
   698  				mkExtentWithStep(60, 180, 20),
   699  			},
   700  		},
   701  		{
   702  			name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
   703  			input: &PrometheusRequest{
   704  				// This request is carefully crated such that cachedEntry is not used to fulfill
   705  				// the request.
   706  				Start: 160,
   707  				End:   180,
   708  				Step:  20,
   709  			},
   710  			cachedEntry: []Extent{
   711  				{
   712  					Start: 60,
   713  					End:   80,
   714  
   715  					// if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
   716  					// response would cause error during Extents merge phase. With the optimization
   717  					// this bad Extent should be dropped. The good Extent below can be used instead.
   718  					Response: nil,
   719  				},
   720  				mkExtentWithStep(60, 160, 20),
   721  			},
   722  			expectedUpdatedCachedEntry: []Extent{
   723  				mkExtentWithStep(60, 180, 20),
   724  			},
   725  		},
   726  	} {
   727  		t.Run(tc.name, func(t *testing.T) {
   728  			sut := resultsCache{
   729  				extractor:      PrometheusResponseExtractor{},
   730  				minCacheExtent: 10,
   731  				limits:         mockLimits{},
   732  				merger:         PrometheusCodec,
   733  				next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   734  					return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil
   735  				}),
   736  			}
   737  
   738  			ctx := user.InjectOrgID(context.Background(), "1")
   739  			response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
   740  			require.NoError(t, err)
   741  
   742  			expectedResponse := mkAPIResponse(tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep())
   743  			require.Equal(t, expectedResponse, response, "response does not match the expectation")
   744  			require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
   745  		})
   746  	}
   747  }
   748  
   749  func TestResultsCache(t *testing.T) {
   750  	calls := 0
   751  	cfg := ResultsCacheConfig{
   752  		CacheConfig: cache.Config{
   753  			Cache: cache.NewMockCache(),
   754  		},
   755  	}
   756  	c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
   757  	require.NoError(t, err)
   758  	rcm, err := NewResultsCacheMiddleware(
   759  		log.NewNopLogger(),
   760  		c,
   761  		constSplitter(day),
   762  		mockLimits{},
   763  		PrometheusCodec,
   764  		PrometheusResponseExtractor{},
   765  		nil,
   766  		nil,
   767  		nil,
   768  	)
   769  	require.NoError(t, err)
   770  
   771  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   772  		calls++
   773  		return parsedResponse, nil
   774  	}))
   775  	ctx := user.InjectOrgID(context.Background(), "1")
   776  	resp, err := rc.Do(ctx, parsedRequest)
   777  	require.NoError(t, err)
   778  	require.Equal(t, 1, calls)
   779  	require.Equal(t, parsedResponse, resp)
   780  
   781  	// Doing same request again shouldn't change anything.
   782  	resp, err = rc.Do(ctx, parsedRequest)
   783  	require.NoError(t, err)
   784  	require.Equal(t, 1, calls)
   785  	require.Equal(t, parsedResponse, resp)
   786  
   787  	// Doing request with new end time should do one more query.
   788  	req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd()+100)
   789  	_, err = rc.Do(ctx, req)
   790  	require.NoError(t, err)
   791  	require.Equal(t, 2, calls)
   792  }
   793  
   794  func TestResultsCacheRecent(t *testing.T) {
   795  	var cfg ResultsCacheConfig
   796  	flagext.DefaultValues(&cfg)
   797  	cfg.CacheConfig.Cache = cache.NewMockCache()
   798  	c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
   799  	require.NoError(t, err)
   800  	rcm, err := NewResultsCacheMiddleware(
   801  		log.NewNopLogger(),
   802  		c,
   803  		constSplitter(day),
   804  		mockLimits{maxCacheFreshness: 10 * time.Minute},
   805  		PrometheusCodec,
   806  		PrometheusResponseExtractor{},
   807  		nil,
   808  		nil,
   809  		nil,
   810  	)
   811  	require.NoError(t, err)
   812  
   813  	req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now()))
   814  
   815  	calls := 0
   816  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) {
   817  		calls++
   818  		assert.Equal(t, r, req)
   819  		return parsedResponse, nil
   820  	}))
   821  	ctx := user.InjectOrgID(context.Background(), "1")
   822  
   823  	// Request should result in a query.
   824  	resp, err := rc.Do(ctx, req)
   825  	require.NoError(t, err)
   826  	require.Equal(t, 1, calls)
   827  	require.Equal(t, parsedResponse, resp)
   828  
   829  	// Doing same request again should result in another query.
   830  	resp, err = rc.Do(ctx, req)
   831  	require.NoError(t, err)
   832  	require.Equal(t, 2, calls)
   833  	require.Equal(t, parsedResponse, resp)
   834  }
   835  
   836  func TestResultsCacheMaxFreshness(t *testing.T) {
   837  	modelNow := model.Now()
   838  	for i, tc := range []struct {
   839  		fakeLimits       Limits
   840  		Handler          HandlerFunc
   841  		expectedResponse *PrometheusResponse
   842  	}{
   843  		{
   844  			fakeLimits:       mockLimits{maxCacheFreshness: 5 * time.Second},
   845  			Handler:          nil,
   846  			expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
   847  		},
   848  		{
   849  			// should not lookup cache because per-tenant override will be applied
   850  			fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
   851  			Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
   852  				return parsedResponse, nil
   853  			}),
   854  			expectedResponse: parsedResponse,
   855  		},
   856  	} {
   857  		t.Run(strconv.Itoa(i), func(t *testing.T) {
   858  			var cfg ResultsCacheConfig
   859  			flagext.DefaultValues(&cfg)
   860  			cfg.CacheConfig.Cache = cache.NewMockCache()
   861  			c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
   862  			require.NoError(t, err)
   863  			fakeLimits := tc.fakeLimits
   864  			rcm, err := NewResultsCacheMiddleware(
   865  				log.NewNopLogger(),
   866  				c,
   867  				constSplitter(day),
   868  				fakeLimits,
   869  				PrometheusCodec,
   870  				PrometheusResponseExtractor{},
   871  				nil,
   872  				nil,
   873  				nil,
   874  			)
   875  			require.NoError(t, err)
   876  
   877  			// create cache with handler
   878  			rc := rcm.Wrap(tc.Handler)
   879  			ctx := user.InjectOrgID(context.Background(), "1")
   880  
   881  			// create request with start end within the key extents
   882  			req := parsedRequest.WithStartEnd(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3))
   883  
   884  			// fill cache
   885  			key := constSplitter(day).GenerateCacheKey("1", req)
   886  			rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
   887  
   888  			resp, err := rc.Do(ctx, req)
   889  			require.NoError(t, err)
   890  			require.Equal(t, tc.expectedResponse, resp)
   891  		})
   892  	}
   893  }
   894  
   895  func Test_resultsCache_MissingData(t *testing.T) {
   896  	cfg := ResultsCacheConfig{
   897  		CacheConfig: cache.Config{
   898  			Cache: cache.NewMockCache(),
   899  		},
   900  	}
   901  	c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
   902  	require.NoError(t, err)
   903  	rm, err := NewResultsCacheMiddleware(
   904  		log.NewNopLogger(),
   905  		c,
   906  		constSplitter(day),
   907  		mockLimits{},
   908  		PrometheusCodec,
   909  		PrometheusResponseExtractor{},
   910  		nil,
   911  		nil,
   912  		nil,
   913  	)
   914  	require.NoError(t, err)
   915  	rc := rm.Wrap(nil).(*resultsCache)
   916  	ctx := context.Background()
   917  
   918  	// fill up the cache
   919  	rc.put(ctx, "empty", []Extent{{
   920  		Start:    100,
   921  		End:      200,
   922  		Response: nil,
   923  	}})
   924  	rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
   925  	rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
   926  		Start:    120,
   927  		End:      200,
   928  		Response: nil,
   929  	}})
   930  
   931  	extents, hit := rc.get(ctx, "empty")
   932  	require.Empty(t, extents)
   933  	require.False(t, hit)
   934  
   935  	extents, hit = rc.get(ctx, "notempty")
   936  	require.Equal(t, len(extents), 1)
   937  	require.True(t, hit)
   938  
   939  	extents, hit = rc.get(ctx, "mixed")
   940  	require.Equal(t, len(extents), 0)
   941  	require.False(t, hit)
   942  }
   943  
   944  func toMs(t time.Duration) int64 {
   945  	return t.Nanoseconds() / (int64(time.Millisecond) / int64(time.Nanosecond))
   946  }
   947  
   948  func TestConstSplitter_generateCacheKey(t *testing.T) {
   949  	t.Parallel()
   950  
   951  	tests := []struct {
   952  		name     string
   953  		r        Request
   954  		interval time.Duration
   955  		want     string
   956  	}{
   957  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
   958  		{"<30m", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
   959  		{"30m", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"},
   960  		{"91m", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"},
   961  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
   962  		{"<1d", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
   963  		{"4d", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"},
   964  		{"3d5h", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"},
   965  	}
   966  	for _, tt := range tests {
   967  		t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
   968  			if got := constSplitter(tt.interval).GenerateCacheKey("fake", tt.r); got != tt.want {
   969  				t.Errorf("generateKey() = %v, want %v", got, tt.want)
   970  			}
   971  		})
   972  	}
   973  }
   974  
   975  func TestResultsCacheShouldCacheFunc(t *testing.T) {
   976  	testcases := []struct {
   977  		name         string
   978  		shouldCache  ShouldCacheFn
   979  		requests     []Request
   980  		expectedCall int
   981  	}{
   982  		{
   983  			name:         "normal",
   984  			shouldCache:  nil,
   985  			requests:     []Request{parsedRequest, parsedRequest},
   986  			expectedCall: 1,
   987  		},
   988  		{
   989  			name: "always no cache",
   990  			shouldCache: func(r Request) bool {
   991  				return false
   992  			},
   993  			requests:     []Request{parsedRequest, parsedRequest},
   994  			expectedCall: 2,
   995  		},
   996  		{
   997  			name: "check cache based on request",
   998  			shouldCache: func(r Request) bool {
   999  				return !r.GetCachingOptions().Disabled
  1000  			},
  1001  			requests:     []Request{noCacheRequest, noCacheRequest},
  1002  			expectedCall: 2,
  1003  		},
  1004  	}
  1005  
  1006  	for _, tc := range testcases {
  1007  		t.Run(tc.name, func(t *testing.T) {
  1008  			calls := 0
  1009  			var cfg ResultsCacheConfig
  1010  			flagext.DefaultValues(&cfg)
  1011  			cfg.CacheConfig.Cache = cache.NewMockCache()
  1012  			c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache)
  1013  			require.NoError(t, err)
  1014  			rcm, err := NewResultsCacheMiddleware(
  1015  				log.NewNopLogger(),
  1016  				c,
  1017  				constSplitter(day),
  1018  				mockLimits{maxCacheFreshness: 10 * time.Minute},
  1019  				PrometheusCodec,
  1020  				PrometheusResponseExtractor{},
  1021  				nil,
  1022  				tc.shouldCache,
  1023  				nil,
  1024  			)
  1025  			require.NoError(t, err)
  1026  			rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
  1027  				calls++
  1028  				return parsedResponse, nil
  1029  			}))
  1030  
  1031  			for _, req := range tc.requests {
  1032  				ctx := user.InjectOrgID(context.Background(), "1")
  1033  				_, err := rc.Do(ctx, req)
  1034  				require.NoError(t, err)
  1035  			}
  1036  
  1037  			require.Equal(t, tc.expectedCall, calls)
  1038  		})
  1039  	}
  1040  }
  1041  
  1042  type mockCacheGenNumberLoader struct{}
  1043  
  1044  func newMockCacheGenNumberLoader() CacheGenNumberLoader {
  1045  	return mockCacheGenNumberLoader{}
  1046  }
  1047  
  1048  func (mockCacheGenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string {
  1049  	return ""
  1050  }