github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/querier/queryrange/results_cache_test.go (about)

     1  package queryrange
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"strconv"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/go-kit/log"
    11  	"github.com/gogo/protobuf/types"
    12  	"github.com/grafana/dskit/flagext"
    13  	"github.com/prometheus/common/model"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  	"github.com/weaveworks/common/user"
    17  
    18  	"github.com/cortexproject/cortex/pkg/chunk/cache"
    19  	"github.com/cortexproject/cortex/pkg/cortexpb"
    20  )
    21  
    22  const (
    23  	query        = "/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"
    24  	responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}`
    25  )
    26  
    27  var (
    28  	parsedRequest = &PrometheusRequest{
    29  		Path:  "/api/v1/query_range",
    30  		Start: 1536673680 * 1e3,
    31  		End:   1536716898 * 1e3,
    32  		Step:  120 * 1e3,
    33  		Query: "sum(container_memory_rss) by (namespace)",
    34  	}
    35  	noCacheRequest = &PrometheusRequest{
    36  		Path:           "/api/v1/query_range",
    37  		Start:          1536673680 * 1e3,
    38  		End:            1536716898 * 1e3,
    39  		Step:           120 * 1e3,
    40  		Query:          "sum(container_memory_rss) by (namespace)",
    41  		CachingOptions: CachingOptions{Disabled: true},
    42  	}
    43  	respHeaders = []*PrometheusResponseHeader{
    44  		{
    45  			Name:   "Content-Type",
    46  			Values: []string{"application/json"},
    47  		},
    48  	}
    49  	parsedResponse = &PrometheusResponse{
    50  		Status: "success",
    51  		Data: PrometheusData{
    52  			ResultType: model.ValMatrix.String(),
    53  			Result: []SampleStream{
    54  				{
    55  					Labels: []cortexpb.LabelAdapter{
    56  						{Name: "foo", Value: "bar"},
    57  					},
    58  					Samples: []cortexpb.Sample{
    59  						{Value: 137, TimestampMs: 1536673680000},
    60  						{Value: 137, TimestampMs: 1536673780000},
    61  					},
    62  				},
    63  			},
    64  		},
    65  	}
    66  )
    67  
    68  func mkAPIResponse(start, end, step int64) *PrometheusResponse {
    69  	var samples []cortexpb.Sample
    70  	for i := start; i <= end; i += step {
    71  		samples = append(samples, cortexpb.Sample{
    72  			TimestampMs: int64(i),
    73  			Value:       float64(i),
    74  		})
    75  	}
    76  
    77  	return &PrometheusResponse{
    78  		Status: StatusSuccess,
    79  		Data: PrometheusData{
    80  			ResultType: matrix,
    81  			Result: []SampleStream{
    82  				{
    83  					Labels: []cortexpb.LabelAdapter{
    84  						{Name: "foo", Value: "bar"},
    85  					},
    86  					Samples: samples,
    87  				},
    88  			},
    89  		},
    90  	}
    91  }
    92  
    93  func mkExtent(start, end int64) Extent {
    94  	return mkExtentWithStep(start, end, 10)
    95  }
    96  
    97  func mkExtentWithStep(start, end, step int64) Extent {
    98  	res := mkAPIResponse(start, end, step)
    99  	any, err := types.MarshalAny(res)
   100  	if err != nil {
   101  		panic(err)
   102  	}
   103  	return Extent{
   104  		Start:    start,
   105  		End:      end,
   106  		Response: any,
   107  	}
   108  }
   109  
   110  func TestShouldCache(t *testing.T) {
   111  	maxCacheTime := int64(150 * 1000)
   112  	c := &resultsCache{logger: log.NewNopLogger(), cacheGenNumberLoader: newMockCacheGenNumberLoader()}
   113  	for _, tc := range []struct {
   114  		name                   string
   115  		request                Request
   116  		input                  Response
   117  		cacheGenNumberToInject string
   118  		expected               bool
   119  	}{
   120  		// Tests only for cacheControlHeader
   121  		{
   122  			name:    "does not contain the cacheControl header",
   123  			request: &PrometheusRequest{Query: "metric"},
   124  			input: Response(&PrometheusResponse{
   125  				Headers: []*PrometheusResponseHeader{
   126  					{
   127  						Name:   "meaninglessheader",
   128  						Values: []string{},
   129  					},
   130  				},
   131  			}),
   132  			expected: true,
   133  		},
   134  		{
   135  			name:    "does contain the cacheControl header which has the value",
   136  			request: &PrometheusRequest{Query: "metric"},
   137  			input: Response(&PrometheusResponse{
   138  				Headers: []*PrometheusResponseHeader{
   139  					{
   140  						Name:   cacheControlHeader,
   141  						Values: []string{noStoreValue},
   142  					},
   143  				},
   144  			}),
   145  			expected: false,
   146  		},
   147  		{
   148  			name:    "cacheControl header contains extra values but still good",
   149  			request: &PrometheusRequest{Query: "metric"},
   150  			input: Response(&PrometheusResponse{
   151  				Headers: []*PrometheusResponseHeader{
   152  					{
   153  						Name:   cacheControlHeader,
   154  						Values: []string{"foo", noStoreValue},
   155  					},
   156  				},
   157  			}),
   158  			expected: false,
   159  		},
   160  		{
   161  			name:     "broken response",
   162  			request:  &PrometheusRequest{Query: "metric"},
   163  			input:    Response(&PrometheusResponse{}),
   164  			expected: true,
   165  		},
   166  		{
   167  			name:    "nil headers",
   168  			request: &PrometheusRequest{Query: "metric"},
   169  			input: Response(&PrometheusResponse{
   170  				Headers: []*PrometheusResponseHeader{nil},
   171  			}),
   172  			expected: true,
   173  		},
   174  		{
   175  			name:    "had cacheControl header but no values",
   176  			request: &PrometheusRequest{Query: "metric"},
   177  			input: Response(&PrometheusResponse{
   178  				Headers: []*PrometheusResponseHeader{{Name: cacheControlHeader}},
   179  			}),
   180  			expected: true,
   181  		},
   182  
   183  		// Tests only for cacheGenNumber header
   184  		{
   185  			name:    "cacheGenNumber not set in both header and store",
   186  			request: &PrometheusRequest{Query: "metric"},
   187  			input: Response(&PrometheusResponse{
   188  				Headers: []*PrometheusResponseHeader{
   189  					{
   190  						Name:   "meaninglessheader",
   191  						Values: []string{},
   192  					},
   193  				},
   194  			}),
   195  			expected: true,
   196  		},
   197  		{
   198  			name:    "cacheGenNumber set in store but not in header",
   199  			request: &PrometheusRequest{Query: "metric"},
   200  			input: Response(&PrometheusResponse{
   201  				Headers: []*PrometheusResponseHeader{
   202  					{
   203  						Name:   "meaninglessheader",
   204  						Values: []string{},
   205  					},
   206  				},
   207  			}),
   208  			cacheGenNumberToInject: "1",
   209  			expected:               false,
   210  		},
   211  		{
   212  			name:    "cacheGenNumber set in header but not in store",
   213  			request: &PrometheusRequest{Query: "metric"},
   214  			input: Response(&PrometheusResponse{
   215  				Headers: []*PrometheusResponseHeader{
   216  					{
   217  						Name:   ResultsCacheGenNumberHeaderName,
   218  						Values: []string{"1"},
   219  					},
   220  				},
   221  			}),
   222  			expected: false,
   223  		},
   224  		{
   225  			name:    "cacheGenNumber in header and store are the same",
   226  			request: &PrometheusRequest{Query: "metric"},
   227  			input: Response(&PrometheusResponse{
   228  				Headers: []*PrometheusResponseHeader{
   229  					{
   230  						Name:   ResultsCacheGenNumberHeaderName,
   231  						Values: []string{"1", "1"},
   232  					},
   233  				},
   234  			}),
   235  			cacheGenNumberToInject: "1",
   236  			expected:               true,
   237  		},
   238  		{
   239  			name:    "inconsistency between cacheGenNumber in header and store",
   240  			request: &PrometheusRequest{Query: "metric"},
   241  			input: Response(&PrometheusResponse{
   242  				Headers: []*PrometheusResponseHeader{
   243  					{
   244  						Name:   ResultsCacheGenNumberHeaderName,
   245  						Values: []string{"1", "2"},
   246  					},
   247  				},
   248  			}),
   249  			cacheGenNumberToInject: "1",
   250  			expected:               false,
   251  		},
   252  		{
   253  			name:    "cacheControl header says not to catch and cacheGenNumbers in store and headers have consistency",
   254  			request: &PrometheusRequest{Query: "metric"},
   255  			input: Response(&PrometheusResponse{
   256  				Headers: []*PrometheusResponseHeader{
   257  					{
   258  						Name:   cacheControlHeader,
   259  						Values: []string{noStoreValue},
   260  					},
   261  					{
   262  						Name:   ResultsCacheGenNumberHeaderName,
   263  						Values: []string{"1", "1"},
   264  					},
   265  				},
   266  			}),
   267  			cacheGenNumberToInject: "1",
   268  			expected:               false,
   269  		},
   270  		// @ modifier on vector selectors.
   271  		{
   272  			name:     "@ modifier on vector selector, before end, before maxCacheTime",
   273  			request:  &PrometheusRequest{Query: "metric @ 123", End: 125000},
   274  			input:    Response(&PrometheusResponse{}),
   275  			expected: true,
   276  		},
   277  		{
   278  			name:     "@ modifier on vector selector, after end, before maxCacheTime",
   279  			request:  &PrometheusRequest{Query: "metric @ 127", End: 125000},
   280  			input:    Response(&PrometheusResponse{}),
   281  			expected: false,
   282  		},
   283  		{
   284  			name:     "@ modifier on vector selector, before end, after maxCacheTime",
   285  			request:  &PrometheusRequest{Query: "metric @ 151", End: 200000},
   286  			input:    Response(&PrometheusResponse{}),
   287  			expected: false,
   288  		},
   289  		{
   290  			name:     "@ modifier on vector selector, after end, after maxCacheTime",
   291  			request:  &PrometheusRequest{Query: "metric @ 151", End: 125000},
   292  			input:    Response(&PrometheusResponse{}),
   293  			expected: false,
   294  		},
   295  		{
   296  			name:     "@ modifier on vector selector with start() before maxCacheTime",
   297  			request:  &PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000},
   298  			input:    Response(&PrometheusResponse{}),
   299  			expected: true,
   300  		},
   301  		{
   302  			name:     "@ modifier on vector selector with end() after maxCacheTime",
   303  			request:  &PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000},
   304  			input:    Response(&PrometheusResponse{}),
   305  			expected: false,
   306  		},
   307  		// @ modifier on matrix selectors.
   308  		{
   309  			name:     "@ modifier on matrix selector, before end, before maxCacheTime",
   310  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000},
   311  			input:    Response(&PrometheusResponse{}),
   312  			expected: true,
   313  		},
   314  		{
   315  			name:     "@ modifier on matrix selector, after end, before maxCacheTime",
   316  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000},
   317  			input:    Response(&PrometheusResponse{}),
   318  			expected: false,
   319  		},
   320  		{
   321  			name:     "@ modifier on matrix selector, before end, after maxCacheTime",
   322  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000},
   323  			input:    Response(&PrometheusResponse{}),
   324  			expected: false,
   325  		},
   326  		{
   327  			name:     "@ modifier on matrix selector, after end, after maxCacheTime",
   328  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000},
   329  			input:    Response(&PrometheusResponse{}),
   330  			expected: false,
   331  		},
   332  		{
   333  			name:     "@ modifier on matrix selector with start() before maxCacheTime",
   334  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000},
   335  			input:    Response(&PrometheusResponse{}),
   336  			expected: true,
   337  		},
   338  		{
   339  			name:     "@ modifier on matrix selector with end() after maxCacheTime",
   340  			request:  &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000},
   341  			input:    Response(&PrometheusResponse{}),
   342  			expected: false,
   343  		},
   344  		// @ modifier on subqueries.
   345  		{
   346  			name:     "@ modifier on subqueries, before end, before maxCacheTime",
   347  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000},
   348  			input:    Response(&PrometheusResponse{}),
   349  			expected: true,
   350  		},
   351  		{
   352  			name:     "@ modifier on subqueries, after end, before maxCacheTime",
   353  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000},
   354  			input:    Response(&PrometheusResponse{}),
   355  			expected: false,
   356  		},
   357  		{
   358  			name:     "@ modifier on subqueries, before end, after maxCacheTime",
   359  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000},
   360  			input:    Response(&PrometheusResponse{}),
   361  			expected: false,
   362  		},
   363  		{
   364  			name:     "@ modifier on subqueries, after end, after maxCacheTime",
   365  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000},
   366  			input:    Response(&PrometheusResponse{}),
   367  			expected: false,
   368  		},
   369  		{
   370  			name:     "@ modifier on subqueries with start() before maxCacheTime",
   371  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000},
   372  			input:    Response(&PrometheusResponse{}),
   373  			expected: true,
   374  		},
   375  		{
   376  			name:     "@ modifier on subqueries with end() after maxCacheTime",
   377  			request:  &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000},
   378  			input:    Response(&PrometheusResponse{}),
   379  			expected: false,
   380  		},
   381  	} {
   382  		{
   383  			t.Run(tc.name, func(t *testing.T) {
   384  				ctx := cache.InjectCacheGenNumber(context.Background(), tc.cacheGenNumberToInject)
   385  				ret := c.shouldCacheResponse(ctx, tc.request, tc.input, maxCacheTime)
   386  				require.Equal(t, tc.expected, ret)
   387  			})
   388  		}
   389  	}
   390  }
   391  
   392  func TestPartition(t *testing.T) {
   393  	for _, tc := range []struct {
   394  		name                   string
   395  		input                  Request
   396  		prevCachedResponse     []Extent
   397  		expectedRequests       []Request
   398  		expectedCachedResponse []Response
   399  	}{
   400  		{
   401  			name: "Test a complete hit.",
   402  			input: &PrometheusRequest{
   403  				Start: 0,
   404  				End:   100,
   405  			},
   406  			prevCachedResponse: []Extent{
   407  				mkExtent(0, 100),
   408  			},
   409  			expectedCachedResponse: []Response{
   410  				mkAPIResponse(0, 100, 10),
   411  			},
   412  		},
   413  
   414  		{
   415  			name: "Test with a complete miss.",
   416  			input: &PrometheusRequest{
   417  				Start: 0,
   418  				End:   100,
   419  			},
   420  			prevCachedResponse: []Extent{
   421  				mkExtent(110, 210),
   422  			},
   423  			expectedRequests: []Request{
   424  				&PrometheusRequest{
   425  					Start: 0,
   426  					End:   100,
   427  				}},
   428  		},
   429  		{
   430  			name: "Test a partial hit.",
   431  			input: &PrometheusRequest{
   432  				Start: 0,
   433  				End:   100,
   434  			},
   435  			prevCachedResponse: []Extent{
   436  				mkExtent(50, 100),
   437  			},
   438  			expectedRequests: []Request{
   439  				&PrometheusRequest{
   440  					Start: 0,
   441  					End:   50,
   442  				},
   443  			},
   444  			expectedCachedResponse: []Response{
   445  				mkAPIResponse(50, 100, 10),
   446  			},
   447  		},
   448  		{
   449  			name: "Test multiple partial hits.",
   450  			input: &PrometheusRequest{
   451  				Start: 100,
   452  				End:   200,
   453  			},
   454  			prevCachedResponse: []Extent{
   455  				mkExtent(50, 120),
   456  				mkExtent(160, 250),
   457  			},
   458  			expectedRequests: []Request{
   459  				&PrometheusRequest{
   460  					Start: 120,
   461  					End:   160,
   462  				},
   463  			},
   464  			expectedCachedResponse: []Response{
   465  				mkAPIResponse(100, 120, 10),
   466  				mkAPIResponse(160, 200, 10),
   467  			},
   468  		},
   469  		{
   470  			name: "Partial hits with tiny gap.",
   471  			input: &PrometheusRequest{
   472  				Start: 100,
   473  				End:   160,
   474  			},
   475  			prevCachedResponse: []Extent{
   476  				mkExtent(50, 120),
   477  				mkExtent(122, 130),
   478  			},
   479  			expectedRequests: []Request{
   480  				&PrometheusRequest{
   481  					Start: 120,
   482  					End:   160,
   483  				},
   484  			},
   485  			expectedCachedResponse: []Response{
   486  				mkAPIResponse(100, 120, 10),
   487  			},
   488  		},
   489  		{
   490  			name: "Extent is outside the range and the request has a single step (same start and end).",
   491  			input: &PrometheusRequest{
   492  				Start: 100,
   493  				End:   100,
   494  			},
   495  			prevCachedResponse: []Extent{
   496  				mkExtent(50, 90),
   497  			},
   498  			expectedRequests: []Request{
   499  				&PrometheusRequest{
   500  					Start: 100,
   501  					End:   100,
   502  				},
   503  			},
   504  		},
   505  		{
   506  			name: "Test when hit has a large step and only a single sample extent.",
   507  			// If there is a only a single sample in the split interval, start and end will be the same.
   508  			input: &PrometheusRequest{
   509  				Start: 100,
   510  				End:   100,
   511  			},
   512  			prevCachedResponse: []Extent{
   513  				mkExtent(100, 100),
   514  			},
   515  			expectedCachedResponse: []Response{
   516  				mkAPIResponse(100, 105, 10),
   517  			},
   518  		},
   519  	} {
   520  		t.Run(tc.name, func(t *testing.T) {
   521  			s := resultsCache{
   522  				extractor:      PrometheusResponseExtractor{},
   523  				minCacheExtent: 10,
   524  			}
   525  			reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
   526  			require.Nil(t, err)
   527  			require.Equal(t, tc.expectedRequests, reqs)
   528  			require.Equal(t, tc.expectedCachedResponse, resps)
   529  		})
   530  	}
   531  }
   532  
   533  func TestHandleHit(t *testing.T) {
   534  	for _, tc := range []struct {
   535  		name                       string
   536  		input                      Request
   537  		cachedEntry                []Extent
   538  		expectedUpdatedCachedEntry []Extent
   539  	}{
   540  		{
   541  			name: "Should drop tiny extent that overlaps with non-tiny request only",
   542  			input: &PrometheusRequest{
   543  				Start: 100,
   544  				End:   120,
   545  				Step:  5,
   546  			},
   547  			cachedEntry: []Extent{
   548  				mkExtentWithStep(0, 50, 5),
   549  				mkExtentWithStep(60, 65, 5),
   550  				mkExtentWithStep(100, 105, 5),
   551  				mkExtentWithStep(110, 150, 5),
   552  				mkExtentWithStep(160, 165, 5),
   553  			},
   554  			expectedUpdatedCachedEntry: []Extent{
   555  				mkExtentWithStep(0, 50, 5),
   556  				mkExtentWithStep(60, 65, 5),
   557  				mkExtentWithStep(100, 150, 5),
   558  				mkExtentWithStep(160, 165, 5),
   559  			},
   560  		},
   561  		{
   562  			name: "Should replace tiny extents that are cover by bigger request",
   563  			input: &PrometheusRequest{
   564  				Start: 100,
   565  				End:   200,
   566  				Step:  5,
   567  			},
   568  			cachedEntry: []Extent{
   569  				mkExtentWithStep(0, 50, 5),
   570  				mkExtentWithStep(60, 65, 5),
   571  				mkExtentWithStep(100, 105, 5),
   572  				mkExtentWithStep(110, 115, 5),
   573  				mkExtentWithStep(120, 125, 5),
   574  				mkExtentWithStep(220, 225, 5),
   575  				mkExtentWithStep(240, 250, 5),
   576  			},
   577  			expectedUpdatedCachedEntry: []Extent{
   578  				mkExtentWithStep(0, 50, 5),
   579  				mkExtentWithStep(60, 65, 5),
   580  				mkExtentWithStep(100, 200, 5),
   581  				mkExtentWithStep(220, 225, 5),
   582  				mkExtentWithStep(240, 250, 5),
   583  			},
   584  		},
   585  		{
   586  			name: "Should not drop tiny extent that completely overlaps with tiny request",
   587  			input: &PrometheusRequest{
   588  				Start: 100,
   589  				End:   105,
   590  				Step:  5,
   591  			},
   592  			cachedEntry: []Extent{
   593  				mkExtentWithStep(0, 50, 5),
   594  				mkExtentWithStep(60, 65, 5),
   595  				mkExtentWithStep(100, 105, 5),
   596  				mkExtentWithStep(160, 165, 5),
   597  			},
   598  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   599  		},
   600  		{
   601  			name: "Should not drop tiny extent that partially center-overlaps with tiny request",
   602  			input: &PrometheusRequest{
   603  				Start: 106,
   604  				End:   108,
   605  				Step:  2,
   606  			},
   607  			cachedEntry: []Extent{
   608  				mkExtentWithStep(60, 64, 2),
   609  				mkExtentWithStep(104, 110, 2),
   610  				mkExtentWithStep(160, 166, 2),
   611  			},
   612  			expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
   613  		},
   614  		{
   615  			name: "Should not drop tiny extent that partially left-overlaps with tiny request",
   616  			input: &PrometheusRequest{
   617  				Start: 100,
   618  				End:   106,
   619  				Step:  2,
   620  			},
   621  			cachedEntry: []Extent{
   622  				mkExtentWithStep(60, 64, 2),
   623  				mkExtentWithStep(104, 110, 2),
   624  				mkExtentWithStep(160, 166, 2),
   625  			},
   626  			expectedUpdatedCachedEntry: []Extent{
   627  				mkExtentWithStep(60, 64, 2),
   628  				mkExtentWithStep(100, 110, 2),
   629  				mkExtentWithStep(160, 166, 2),
   630  			},
   631  		},
   632  		{
   633  			name: "Should not drop tiny extent that partially right-overlaps with tiny request",
   634  			input: &PrometheusRequest{
   635  				Start: 100,
   636  				End:   106,
   637  				Step:  2,
   638  			},
   639  			cachedEntry: []Extent{
   640  				mkExtentWithStep(60, 64, 2),
   641  				mkExtentWithStep(98, 102, 2),
   642  				mkExtentWithStep(160, 166, 2),
   643  			},
   644  			expectedUpdatedCachedEntry: []Extent{
   645  				mkExtentWithStep(60, 64, 2),
   646  				mkExtentWithStep(98, 106, 2),
   647  				mkExtentWithStep(160, 166, 2),
   648  			},
   649  		},
   650  		{
   651  			name: "Should merge fragmented extents if request fills the hole",
   652  			input: &PrometheusRequest{
   653  				Start: 40,
   654  				End:   80,
   655  				Step:  20,
   656  			},
   657  			cachedEntry: []Extent{
   658  				mkExtentWithStep(0, 20, 20),
   659  				mkExtentWithStep(80, 100, 20),
   660  			},
   661  			expectedUpdatedCachedEntry: []Extent{
   662  				mkExtentWithStep(0, 100, 20),
   663  			},
   664  		},
   665  		{
   666  			name: "Should left-extend extent if request starts earlier than extent in cache",
   667  			input: &PrometheusRequest{
   668  				Start: 40,
   669  				End:   80,
   670  				Step:  20,
   671  			},
   672  			cachedEntry: []Extent{
   673  				mkExtentWithStep(60, 160, 20),
   674  			},
   675  			expectedUpdatedCachedEntry: []Extent{
   676  				mkExtentWithStep(40, 160, 20),
   677  			},
   678  		},
   679  		{
   680  			name: "Should right-extend extent if request ends later than extent in cache",
   681  			input: &PrometheusRequest{
   682  				Start: 100,
   683  				End:   180,
   684  				Step:  20,
   685  			},
   686  			cachedEntry: []Extent{
   687  				mkExtentWithStep(60, 160, 20),
   688  			},
   689  			expectedUpdatedCachedEntry: []Extent{
   690  				mkExtentWithStep(60, 180, 20),
   691  			},
   692  		},
   693  		{
   694  			name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
   695  			input: &PrometheusRequest{
   696  				// This request is carefully crated such that cachedEntry is not used to fulfill
   697  				// the request.
   698  				Start: 160,
   699  				End:   180,
   700  				Step:  20,
   701  			},
   702  			cachedEntry: []Extent{
   703  				{
   704  					Start: 60,
   705  					End:   80,
   706  
   707  					// if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
   708  					// response would cause error during Extents merge phase. With the optimization
   709  					// this bad Extent should be dropped. The good Extent below can be used instead.
   710  					Response: nil,
   711  				},
   712  				mkExtentWithStep(60, 160, 20),
   713  			},
   714  			expectedUpdatedCachedEntry: []Extent{
   715  				mkExtentWithStep(60, 180, 20),
   716  			},
   717  		},
   718  	} {
   719  		t.Run(tc.name, func(t *testing.T) {
   720  			sut := resultsCache{
   721  				extractor:      PrometheusResponseExtractor{},
   722  				minCacheExtent: 10,
   723  				limits:         mockLimits{},
   724  				merger:         PrometheusCodec,
   725  				next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   726  					return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil
   727  				}),
   728  			}
   729  
   730  			ctx := user.InjectOrgID(context.Background(), "1")
   731  			response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
   732  			require.NoError(t, err)
   733  
   734  			expectedResponse := mkAPIResponse(tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep())
   735  			require.Equal(t, expectedResponse, response, "response does not match the expectation")
   736  			require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
   737  		})
   738  	}
   739  }
   740  
   741  func TestResultsCache(t *testing.T) {
   742  	calls := 0
   743  	cfg := ResultsCacheConfig{
   744  		CacheConfig: cache.Config{
   745  			Cache: cache.NewMockCache(),
   746  		},
   747  	}
   748  	rcm, _, err := NewResultsCacheMiddleware(
   749  		log.NewNopLogger(),
   750  		cfg,
   751  		constSplitter(day),
   752  		mockLimits{},
   753  		PrometheusCodec,
   754  		PrometheusResponseExtractor{},
   755  		nil,
   756  		nil,
   757  		nil,
   758  	)
   759  	require.NoError(t, err)
   760  
   761  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
   762  		calls++
   763  		return parsedResponse, nil
   764  	}))
   765  	ctx := user.InjectOrgID(context.Background(), "1")
   766  	resp, err := rc.Do(ctx, parsedRequest)
   767  	require.NoError(t, err)
   768  	require.Equal(t, 1, calls)
   769  	require.Equal(t, parsedResponse, resp)
   770  
   771  	// Doing same request again shouldn't change anything.
   772  	resp, err = rc.Do(ctx, parsedRequest)
   773  	require.NoError(t, err)
   774  	require.Equal(t, 1, calls)
   775  	require.Equal(t, parsedResponse, resp)
   776  
   777  	// Doing request with new end time should do one more query.
   778  	req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd()+100)
   779  	_, err = rc.Do(ctx, req)
   780  	require.NoError(t, err)
   781  	require.Equal(t, 2, calls)
   782  }
   783  
   784  func TestResultsCacheRecent(t *testing.T) {
   785  	var cfg ResultsCacheConfig
   786  	flagext.DefaultValues(&cfg)
   787  	cfg.CacheConfig.Cache = cache.NewMockCache()
   788  	rcm, _, err := NewResultsCacheMiddleware(
   789  		log.NewNopLogger(),
   790  		cfg,
   791  		constSplitter(day),
   792  		mockLimits{maxCacheFreshness: 10 * time.Minute},
   793  		PrometheusCodec,
   794  		PrometheusResponseExtractor{},
   795  		nil,
   796  		nil,
   797  		nil,
   798  	)
   799  	require.NoError(t, err)
   800  
   801  	req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now()))
   802  
   803  	calls := 0
   804  	rc := rcm.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) {
   805  		calls++
   806  		assert.Equal(t, r, req)
   807  		return parsedResponse, nil
   808  	}))
   809  	ctx := user.InjectOrgID(context.Background(), "1")
   810  
   811  	// Request should result in a query.
   812  	resp, err := rc.Do(ctx, req)
   813  	require.NoError(t, err)
   814  	require.Equal(t, 1, calls)
   815  	require.Equal(t, parsedResponse, resp)
   816  
   817  	// Doing same request again should result in another query.
   818  	resp, err = rc.Do(ctx, req)
   819  	require.NoError(t, err)
   820  	require.Equal(t, 2, calls)
   821  	require.Equal(t, parsedResponse, resp)
   822  }
   823  
   824  func TestResultsCacheMaxFreshness(t *testing.T) {
   825  	modelNow := model.Now()
   826  	for i, tc := range []struct {
   827  		fakeLimits       Limits
   828  		Handler          HandlerFunc
   829  		expectedResponse *PrometheusResponse
   830  	}{
   831  		{
   832  			fakeLimits:       mockLimits{maxCacheFreshness: 5 * time.Second},
   833  			Handler:          nil,
   834  			expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
   835  		},
   836  		{
   837  			// should not lookup cache because per-tenant override will be applied
   838  			fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
   839  			Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
   840  				return parsedResponse, nil
   841  			}),
   842  			expectedResponse: parsedResponse,
   843  		},
   844  	} {
   845  		t.Run(strconv.Itoa(i), func(t *testing.T) {
   846  			var cfg ResultsCacheConfig
   847  			flagext.DefaultValues(&cfg)
   848  			cfg.CacheConfig.Cache = cache.NewMockCache()
   849  
   850  			fakeLimits := tc.fakeLimits
   851  			rcm, _, err := NewResultsCacheMiddleware(
   852  				log.NewNopLogger(),
   853  				cfg,
   854  				constSplitter(day),
   855  				fakeLimits,
   856  				PrometheusCodec,
   857  				PrometheusResponseExtractor{},
   858  				nil,
   859  				nil,
   860  				nil,
   861  			)
   862  			require.NoError(t, err)
   863  
   864  			// create cache with handler
   865  			rc := rcm.Wrap(tc.Handler)
   866  			ctx := user.InjectOrgID(context.Background(), "1")
   867  
   868  			// create request with start end within the key extents
   869  			req := parsedRequest.WithStartEnd(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3))
   870  
   871  			// fill cache
   872  			key := constSplitter(day).GenerateCacheKey("1", req)
   873  			rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
   874  
   875  			resp, err := rc.Do(ctx, req)
   876  			require.NoError(t, err)
   877  			require.Equal(t, tc.expectedResponse, resp)
   878  		})
   879  	}
   880  }
   881  
   882  func Test_resultsCache_MissingData(t *testing.T) {
   883  	cfg := ResultsCacheConfig{
   884  		CacheConfig: cache.Config{
   885  			Cache: cache.NewMockCache(),
   886  		},
   887  	}
   888  	rm, _, err := NewResultsCacheMiddleware(
   889  		log.NewNopLogger(),
   890  		cfg,
   891  		constSplitter(day),
   892  		mockLimits{},
   893  		PrometheusCodec,
   894  		PrometheusResponseExtractor{},
   895  		nil,
   896  		nil,
   897  		nil,
   898  	)
   899  	require.NoError(t, err)
   900  	rc := rm.Wrap(nil).(*resultsCache)
   901  	ctx := context.Background()
   902  
   903  	// fill up the cache
   904  	rc.put(ctx, "empty", []Extent{{
   905  		Start:    100,
   906  		End:      200,
   907  		Response: nil,
   908  	}})
   909  	rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
   910  	rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
   911  		Start:    120,
   912  		End:      200,
   913  		Response: nil,
   914  	}})
   915  
   916  	extents, hit := rc.get(ctx, "empty")
   917  	require.Empty(t, extents)
   918  	require.False(t, hit)
   919  
   920  	extents, hit = rc.get(ctx, "notempty")
   921  	require.Equal(t, len(extents), 1)
   922  	require.True(t, hit)
   923  
   924  	extents, hit = rc.get(ctx, "mixed")
   925  	require.Equal(t, len(extents), 0)
   926  	require.False(t, hit)
   927  }
   928  
   929  func TestConstSplitter_generateCacheKey(t *testing.T) {
   930  	t.Parallel()
   931  
   932  	tests := []struct {
   933  		name     string
   934  		r        Request
   935  		interval time.Duration
   936  		want     string
   937  	}{
   938  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
   939  		{"<30m", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"},
   940  		{"30m", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"},
   941  		{"91m", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"},
   942  		{"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
   943  		{"<1d", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"},
   944  		{"4d", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"},
   945  		{"3d5h", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"},
   946  	}
   947  	for _, tt := range tests {
   948  		t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
   949  			if got := constSplitter(tt.interval).GenerateCacheKey("fake", tt.r); got != tt.want {
   950  				t.Errorf("generateKey() = %v, want %v", got, tt.want)
   951  			}
   952  		})
   953  	}
   954  }
   955  
   956  func TestResultsCacheShouldCacheFunc(t *testing.T) {
   957  	testcases := []struct {
   958  		name         string
   959  		shouldCache  ShouldCacheFn
   960  		requests     []Request
   961  		expectedCall int
   962  	}{
   963  		{
   964  			name:         "normal",
   965  			shouldCache:  nil,
   966  			requests:     []Request{parsedRequest, parsedRequest},
   967  			expectedCall: 1,
   968  		},
   969  		{
   970  			name: "always no cache",
   971  			shouldCache: func(r Request) bool {
   972  				return false
   973  			},
   974  			requests:     []Request{parsedRequest, parsedRequest},
   975  			expectedCall: 2,
   976  		},
   977  		{
   978  			name: "check cache based on request",
   979  			shouldCache: func(r Request) bool {
   980  				return !r.GetCachingOptions().Disabled
   981  			},
   982  			requests:     []Request{noCacheRequest, noCacheRequest},
   983  			expectedCall: 2,
   984  		},
   985  	}
   986  
   987  	for _, tc := range testcases {
   988  		t.Run(tc.name, func(t *testing.T) {
   989  			calls := 0
   990  			var cfg ResultsCacheConfig
   991  			flagext.DefaultValues(&cfg)
   992  			cfg.CacheConfig.Cache = cache.NewMockCache()
   993  			rcm, _, err := NewResultsCacheMiddleware(
   994  				log.NewNopLogger(),
   995  				cfg,
   996  				constSplitter(day),
   997  				mockLimits{maxCacheFreshness: 10 * time.Minute},
   998  				PrometheusCodec,
   999  				PrometheusResponseExtractor{},
  1000  				nil,
  1001  				tc.shouldCache,
  1002  				nil,
  1003  			)
  1004  			require.NoError(t, err)
  1005  			rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
  1006  				calls++
  1007  				return parsedResponse, nil
  1008  			}))
  1009  
  1010  			for _, req := range tc.requests {
  1011  				ctx := user.InjectOrgID(context.Background(), "1")
  1012  				_, err := rc.Do(ctx, req)
  1013  				require.NoError(t, err)
  1014  			}
  1015  
  1016  			require.Equal(t, tc.expectedCall, calls)
  1017  		})
  1018  	}
  1019  }
  1020  
  1021  func toMs(t time.Duration) int64 {
  1022  	return int64(t / time.Millisecond)
  1023  }
  1024  
  1025  type mockCacheGenNumberLoader struct {
  1026  }
  1027  
  1028  func newMockCacheGenNumberLoader() CacheGenNumberLoader {
  1029  	return mockCacheGenNumberLoader{}
  1030  }
  1031  
  1032  func (mockCacheGenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string {
  1033  	return ""
  1034  }