github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/storage/chunk/client/aws/metrics_autoscaling_test.go (about)

     1  package aws
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/pkg/errors"
    10  	promV1 "github.com/prometheus/client_golang/api/prometheus/v1"
    11  	"github.com/prometheus/common/model"
    12  	"github.com/stretchr/testify/require"
    13  	"github.com/weaveworks/common/mtime"
    14  
    15  	"github.com/grafana/loki/pkg/storage/config"
    16  	"github.com/grafana/loki/pkg/storage/stores/series/index"
    17  )
    18  
    19  const (
    20  	tablePrefix      = "cortex_"
    21  	chunkTablePrefix = "chunks_"
    22  	tablePeriod      = 7 * 24 * time.Hour
    23  	gracePeriod      = 15 * time.Minute
    24  	maxChunkAge      = 12 * time.Hour
    25  	inactiveWrite    = 1
    26  	inactiveRead     = 2
    27  	write            = 200
    28  	read             = 100
    29  )
    30  
    31  func fixtureWriteScale() config.AutoScalingConfig {
    32  	return config.AutoScalingConfig{
    33  		Enabled:     true,
    34  		MinCapacity: 100,
    35  		MaxCapacity: 250,
    36  		OutCooldown: 100,
    37  		InCooldown:  100,
    38  		TargetValue: 80.0,
    39  	}
    40  }
    41  
    42  func fixtureReadScale() config.AutoScalingConfig {
    43  	return config.AutoScalingConfig{
    44  		Enabled:     true,
    45  		MinCapacity: 1,
    46  		MaxCapacity: 2000,
    47  		OutCooldown: 100,
    48  		InCooldown:  100,
    49  		TargetValue: 80.0,
    50  	}
    51  }
    52  
    53  func fixturePeriodicTableConfig(prefix string) config.PeriodicTableConfig {
    54  	return config.PeriodicTableConfig{
    55  		Prefix: prefix,
    56  		Period: tablePeriod,
    57  	}
    58  }
    59  
    60  func fixtureProvisionConfig(inactLastN int64, writeScale, inactWriteScale config.AutoScalingConfig) config.ProvisionConfig {
    61  	return config.ProvisionConfig{
    62  		ActiveTableProvisionConfig: config.ActiveTableProvisionConfig{
    63  			ProvisionedWriteThroughput: write,
    64  			ProvisionedReadThroughput:  read,
    65  			WriteScale:                 writeScale,
    66  		},
    67  		InactiveTableProvisionConfig: config.InactiveTableProvisionConfig{
    68  			InactiveWriteThroughput: inactiveWrite,
    69  			InactiveReadThroughput:  inactiveRead,
    70  			InactiveWriteScale:      inactWriteScale,
    71  			InactiveWriteScaleLastN: inactLastN,
    72  		},
    73  	}
    74  }
    75  
    76  func fixtureReadProvisionConfig(readScale, inactReadScale config.AutoScalingConfig) config.ProvisionConfig {
    77  	return config.ProvisionConfig{
    78  		ActiveTableProvisionConfig: config.ActiveTableProvisionConfig{
    79  			ProvisionedWriteThroughput: write,
    80  			ProvisionedReadThroughput:  read,
    81  			ReadScale:                  readScale,
    82  		},
    83  		InactiveTableProvisionConfig: config.InactiveTableProvisionConfig{
    84  			InactiveWriteThroughput: inactiveWrite,
    85  			InactiveReadThroughput:  inactiveRead,
    86  			InactiveReadScale:       inactReadScale,
    87  		},
    88  	}
    89  }
    90  
    91  // nolint
    92  func baseTable(name string, provisionedRead, provisionedWrite int64) []config.TableDesc {
    93  	return []config.TableDesc{
    94  		{
    95  			Name:             name,
    96  			ProvisionedRead:  provisionedRead,
    97  			ProvisionedWrite: provisionedWrite,
    98  		},
    99  	}
   100  }
   101  
   102  func staticTable(i int, indexRead, indexWrite, chunkRead, chunkWrite int64) []config.TableDesc {
   103  	return []config.TableDesc{
   104  		{
   105  			Name:             tablePrefix + fmt.Sprint(i),
   106  			ProvisionedRead:  indexRead,
   107  			ProvisionedWrite: indexWrite,
   108  		},
   109  		{
   110  			Name:             chunkTablePrefix + fmt.Sprint(i),
   111  			ProvisionedRead:  chunkRead,
   112  			ProvisionedWrite: chunkWrite,
   113  		},
   114  	}
   115  }
   116  
   117  func test(t *testing.T, client dynamoTableClient, tableManager *index.TableManager, name string, tm time.Time, expected []config.TableDesc) {
   118  	t.Run(name, func(t *testing.T) {
   119  		ctx := context.Background()
   120  		mtime.NowForce(tm)
   121  		defer mtime.NowReset()
   122  		if err := tableManager.SyncTables(ctx); err != nil {
   123  			t.Fatal(err)
   124  		}
   125  		err := index.ExpectTables(ctx, client, expected)
   126  		require.NoError(t, err)
   127  	})
   128  }
   129  
   130  func TestTableManagerMetricsAutoScaling(t *testing.T) {
   131  	dynamoDB := newMockDynamoDB(0, 0)
   132  	mockProm := mockPrometheus{}
   133  
   134  	client := dynamoTableClient{
   135  		DynamoDB: dynamoDB,
   136  		autoscale: &metricsData{
   137  			promAPI: &mockProm,
   138  			cfg: MetricsAutoScalingConfig{
   139  				TargetQueueLen: 100000,
   140  				ScaleUpFactor:  1.2,
   141  			},
   142  			tableLastUpdated: make(map[string]time.Time),
   143  		},
   144  		metrics: newMetrics(nil),
   145  	}
   146  
   147  	indexWriteScale := fixtureWriteScale()
   148  	chunkWriteScale := fixtureWriteScale()
   149  	chunkWriteScale.MaxCapacity /= 5
   150  	chunkWriteScale.MinCapacity /= 5
   151  	inactiveWriteScale := fixtureWriteScale()
   152  	inactiveWriteScale.MinCapacity = 5
   153  
   154  	// Set up table-manager config
   155  	cfg := config.SchemaConfig{
   156  		Configs: []config.PeriodConfig{
   157  			{
   158  				IndexType: "aws-dynamo",
   159  				IndexTables: config.PeriodicTableConfig{
   160  					Prefix: "a",
   161  				},
   162  			},
   163  			{
   164  				IndexType:   "aws-dynamo",
   165  				IndexTables: fixturePeriodicTableConfig(tablePrefix),
   166  				ChunkTables: fixturePeriodicTableConfig(chunkTablePrefix),
   167  			},
   168  		},
   169  	}
   170  	tbm := index.TableManagerConfig{
   171  		CreationGracePeriod: gracePeriod,
   172  		IndexTables:         fixtureProvisionConfig(2, indexWriteScale, inactiveWriteScale),
   173  		ChunkTables:         fixtureProvisionConfig(2, chunkWriteScale, inactiveWriteScale),
   174  	}
   175  
   176  	tableManager, err := index.NewTableManager(tbm, cfg, maxChunkAge, client, nil, nil, nil)
   177  	if err != nil {
   178  		t.Fatal(err)
   179  	}
   180  
   181  	// Create tables
   182  	startTime := time.Unix(0, 0).Add(maxChunkAge).Add(gracePeriod)
   183  
   184  	test(t, client, tableManager, "Create tables",
   185  		startTime,
   186  		append(baseTable("a", inactiveRead, inactiveWrite),
   187  			staticTable(0, read, write, read, write)...),
   188  	)
   189  
   190  	mockProm.SetResponseForWrites(0, 100000, 100000, []int{0, 0}, []int{100, 20})
   191  	test(t, client, tableManager, "Queues but no throttling",
   192  		startTime.Add(time.Minute*10),
   193  		append(baseTable("a", inactiveRead, inactiveWrite),
   194  			staticTable(0, read, write, read, write)...), // - remain flat
   195  	)
   196  
   197  	mockProm.SetResponseForWrites(0, 120000, 100000, []int{100, 200}, []int{100, 20})
   198  	test(t, client, tableManager, "Shrinking queues",
   199  		startTime.Add(time.Minute*20),
   200  		append(baseTable("a", inactiveRead, inactiveWrite),
   201  			staticTable(0, read, write, read, write)...), //  - remain flat
   202  	)
   203  
   204  	mockProm.SetResponseForWrites(0, 120000, 200000, []int{100, 0}, []int{100, 20})
   205  	test(t, client, tableManager, "Building queues",
   206  		startTime.Add(time.Minute*30),
   207  		append(baseTable("a", inactiveRead, inactiveWrite),
   208  			staticTable(0, read, 240, read, write)...), // - scale up index table
   209  	)
   210  
   211  	mockProm.SetResponseForWrites(0, 5000000, 5000000, []int{1, 0}, []int{100, 20})
   212  	test(t, client, tableManager, "Large queues small throtttling",
   213  		startTime.Add(time.Minute*40),
   214  		append(baseTable("a", inactiveRead, inactiveWrite),
   215  			staticTable(0, read, 250, read, write)...), // - scale up index table
   216  	)
   217  
   218  	mockProm.SetResponseForWrites(0, 0, 0, []int{0, 0}, []int{120, 40})
   219  	test(t, client, tableManager, "No queues no throttling",
   220  		startTime.Add(time.Minute*100),
   221  		append(baseTable("a", inactiveRead, inactiveWrite),
   222  			staticTable(0, read, 150, read, 50)...), // - scale down both tables
   223  	)
   224  
   225  	mockProm.SetResponseForWrites(0, 0, 0, []int{0, 0}, []int{50, 10})
   226  	test(t, client, tableManager, "in cooldown period",
   227  		startTime.Add(time.Minute*101),
   228  		append(baseTable("a", inactiveRead, inactiveWrite),
   229  			staticTable(0, read, 150, read, 50)...), // - no change; in cooldown period
   230  	)
   231  
   232  	mockProm.SetResponseForWrites(0, 0, 0, []int{0, 0}, []int{90, 10})
   233  	test(t, client, tableManager, "No queues no throttling",
   234  		startTime.Add(time.Minute*200),
   235  		append(baseTable("a", inactiveRead, inactiveWrite),
   236  			staticTable(0, read, 112, read, 20)...), // - scale down both again
   237  	)
   238  
   239  	mockProm.SetResponseForWrites(0, 0, 0, []int{0, 0}, []int{50, 10})
   240  	test(t, client, tableManager, "de minimis change",
   241  		startTime.Add(time.Minute*220),
   242  		append(baseTable("a", inactiveRead, inactiveWrite),
   243  			staticTable(0, read, 112, read, 20)...), // - should see no change
   244  	)
   245  
   246  	mockProm.SetResponseForWrites(0, 0, 0, []int{30, 30, 30, 30}, []int{50, 10, 100, 20})
   247  	test(t, client, tableManager, "Next week",
   248  		startTime.Add(tablePeriod),
   249  		// Nothing much happening - expect table 0 write rates to stay as-is and table 1 to be created with defaults
   250  		append(append(baseTable("a", inactiveRead, inactiveWrite),
   251  			staticTable(0, inactiveRead, 112, inactiveRead, 20)...),
   252  			staticTable(1, read, write, read, write)...),
   253  	)
   254  
   255  	// No throttling on last week's index table, still some on chunk table
   256  	mockProm.SetResponseForWrites(0, 0, 0, []int{0, 30, 30, 30}, []int{10, 2, 100, 20})
   257  	test(t, client, tableManager, "Next week plus a bit",
   258  		startTime.Add(tablePeriod).Add(time.Minute*10),
   259  		append(append(baseTable("a", inactiveRead, inactiveWrite),
   260  			staticTable(0, inactiveRead, 12, inactiveRead, 20)...), // Scale back last week's index table
   261  			staticTable(1, read, write, read, write)...),
   262  	)
   263  
   264  	// No throttling on last week's tables but some queueing
   265  	mockProm.SetResponseForWrites(20000, 20000, 20000, []int{0, 0, 1, 1}, []int{0, 0, 100, 20})
   266  	test(t, client, tableManager, "Next week plus a bit",
   267  		startTime.Add(tablePeriod).Add(time.Minute*20),
   268  		append(append(baseTable("a", inactiveRead, inactiveWrite),
   269  			staticTable(0, inactiveRead, 12, inactiveRead, 20)...), // no scaling back
   270  			staticTable(1, read, write, read, write)...),
   271  	)
   272  
   273  	mockProm.SetResponseForWrites(120000, 130000, 140000, []int{0, 0, 1, 0}, []int{0, 0, 100, 20})
   274  	test(t, client, tableManager, "next week, queues building, throttling on index table",
   275  		startTime.Add(tablePeriod).Add(time.Minute*30),
   276  		append(append(baseTable("a", inactiveRead, inactiveWrite),
   277  			staticTable(0, inactiveRead, 12, inactiveRead, 20)...), // no scaling back
   278  			staticTable(1, read, 240, read, write)...), // scale up index table
   279  	)
   280  
   281  	mockProm.SetResponseForWrites(140000, 130000, 120000, []int{0, 0, 1, 0}, []int{0, 0, 100, 20})
   282  	test(t, client, tableManager, "next week, queues shrinking, throttling on index table",
   283  		startTime.Add(tablePeriod).Add(time.Minute*40),
   284  		append(append(baseTable("a", inactiveRead, inactiveWrite),
   285  			staticTable(0, inactiveRead, 5, inactiveRead, 5)...), // scale right back
   286  			staticTable(1, read, 240, read, 25)...), // scale chunk table to usage/80%
   287  	)
   288  }
   289  
   290  func TestTableManagerMetricsReadAutoScaling(t *testing.T) {
   291  	dynamoDB := newMockDynamoDB(0, 0)
   292  	mockProm := mockPrometheus{}
   293  
   294  	client := dynamoTableClient{
   295  		DynamoDB: dynamoDB,
   296  		autoscale: &metricsData{
   297  			promAPI: &mockProm,
   298  			cfg: MetricsAutoScalingConfig{
   299  				TargetQueueLen: 100000,
   300  				ScaleUpFactor:  1.2,
   301  			},
   302  			tableLastUpdated:     make(map[string]time.Time),
   303  			tableReadLastUpdated: make(map[string]time.Time),
   304  		},
   305  		metrics: newMetrics(nil),
   306  	}
   307  
   308  	indexReadScale := fixtureReadScale()
   309  	chunkReadScale := fixtureReadScale()
   310  	inactiveReadScale := fixtureReadScale()
   311  	inactiveReadScale.MinCapacity = 5
   312  
   313  	// Set up table-manager config
   314  	cfg := config.SchemaConfig{
   315  		Configs: []config.PeriodConfig{
   316  			{
   317  				IndexType: "aws-dynamo",
   318  				IndexTables: config.PeriodicTableConfig{
   319  					Prefix: "a",
   320  				},
   321  			},
   322  			{
   323  				IndexType:   "aws-dynamo",
   324  				IndexTables: fixturePeriodicTableConfig(tablePrefix),
   325  				ChunkTables: fixturePeriodicTableConfig(chunkTablePrefix),
   326  			},
   327  		},
   328  	}
   329  	tbm := index.TableManagerConfig{
   330  		CreationGracePeriod: gracePeriod,
   331  		IndexTables:         fixtureReadProvisionConfig(indexReadScale, inactiveReadScale),
   332  		ChunkTables:         fixtureReadProvisionConfig(chunkReadScale, inactiveReadScale),
   333  	}
   334  
   335  	tableManager, err := index.NewTableManager(tbm, cfg, maxChunkAge, client, nil, nil, nil)
   336  	if err != nil {
   337  		t.Fatal(err)
   338  	}
   339  
   340  	// Create tables
   341  	startTime := time.Unix(0, 0).Add(maxChunkAge).Add(gracePeriod)
   342  
   343  	test(t, client, tableManager, "Create tables",
   344  		startTime,
   345  		append(baseTable("a", inactiveRead, inactiveWrite),
   346  			staticTable(0, read, write, read, write)...),
   347  	)
   348  
   349  	mockProm.SetResponseForReads([][]int{{0, 0}}, [][]int{{0, 0}})
   350  	test(t, client, tableManager, "No Query Usage",
   351  		startTime.Add(time.Minute*10),
   352  		append(baseTable("a", inactiveRead, inactiveWrite),
   353  			staticTable(0, 1, write, 1, write)...), // - remain flat
   354  	)
   355  
   356  	mockProm.SetResponseForReads([][]int{{10, 10}}, [][]int{{0, 0}})
   357  	test(t, client, tableManager, "Query Usage but no errors",
   358  		startTime.Add(time.Minute*20),
   359  		append(baseTable("a", inactiveRead, inactiveWrite),
   360  			staticTable(0, 201, write, 201, write)...), //  - less than 10% of max ... scale read on both
   361  	)
   362  
   363  	mockProm.SetResponseForReads([][]int{{11, 11}}, [][]int{{20, 0}})
   364  	test(t, client, tableManager, "Query Usage and throttling on index",
   365  		startTime.Add(time.Minute*30),
   366  		append(baseTable("a", inactiveRead, inactiveWrite),
   367  			staticTable(0, 401, write, 201, write)...), // - scale up index table read
   368  	)
   369  
   370  	mockProm.SetResponseForReads([][]int{{12, 12}}, [][]int{{20, 20}})
   371  	test(t, client, tableManager, "Query Usage and throttling on index plus chunk",
   372  		startTime.Add(time.Minute*40),
   373  		append(baseTable("a", inactiveRead, inactiveWrite),
   374  			staticTable(0, 601, write, 401, write)...), // - scale up index more and scale chunk a step
   375  	)
   376  
   377  	mockProm.SetResponseForReads([][]int{{13, 13}}, [][]int{{200, 200}})
   378  	test(t, client, tableManager, "in cooldown period",
   379  		startTime.Add(time.Minute*41),
   380  		append(baseTable("a", inactiveRead, inactiveWrite),
   381  			staticTable(0, 601, write, 401, write)...), // - no change; in cooldown period
   382  	)
   383  
   384  	mockProm.SetResponseForReads([][]int{{13, 13}}, [][]int{{0, 0}})
   385  	test(t, client, tableManager, "Sustained Query Usage",
   386  		startTime.Add(time.Minute*100),
   387  		append(baseTable("a", inactiveRead, inactiveWrite),
   388  			staticTable(0, 601, write, 401, write)...), // - errors have stopped, but usage continues so no scaling
   389  	)
   390  
   391  	mockProm.SetResponseForReads([][]int{{0, 0}}, [][]int{{0, 0}})
   392  	test(t, client, tableManager, "Query Usage has ended",
   393  		startTime.Add(time.Minute*200),
   394  		append(baseTable("a", inactiveRead, inactiveWrite),
   395  			staticTable(0, 1, write, 1, write)...), // - scale down to minimum... no usage at all
   396  	)
   397  }
   398  
   399  // Helper to return pre-canned results to Prometheus queries
   400  type mockPrometheus struct {
   401  	promV1.API
   402  	rangeValues []model.Value
   403  }
   404  
   405  func (m *mockPrometheus) SetResponseForWrites(q0, q1, q2 model.SampleValue, throttleRates ...[]int) {
   406  	// Mock metrics from Prometheus
   407  	m.rangeValues = []model.Value{
   408  		// Queue lengths
   409  		model.Matrix{
   410  			&model.SampleStream{Values: []model.SamplePair{
   411  				{Timestamp: 0, Value: q0},
   412  				{Timestamp: 15000, Value: q1},
   413  				{Timestamp: 30000, Value: q2},
   414  			}},
   415  		},
   416  	}
   417  	for _, rates := range throttleRates {
   418  		throttleMatrix := model.Matrix{}
   419  		for i := 0; i < len(rates)/2; i++ {
   420  			throttleMatrix = append(throttleMatrix,
   421  				&model.SampleStream{
   422  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   423  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2])}},
   424  				},
   425  				&model.SampleStream{
   426  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   427  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2+1])}},
   428  				})
   429  		}
   430  		m.rangeValues = append(m.rangeValues, throttleMatrix)
   431  	}
   432  	// stub response for usage queries (not used in write tests)
   433  	for _, rates := range throttleRates {
   434  		readUsageMatrix := model.Matrix{}
   435  		for i := 0; i < len(rates)/2; i++ {
   436  			readUsageMatrix = append(readUsageMatrix,
   437  				&model.SampleStream{
   438  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   439  					Values: []model.SamplePair{{Timestamp: 30000, Value: 0}},
   440  				},
   441  				&model.SampleStream{
   442  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   443  					Values: []model.SamplePair{{Timestamp: 30000, Value: 0}},
   444  				})
   445  		}
   446  		m.rangeValues = append(m.rangeValues, readUsageMatrix)
   447  	}
   448  	// stub response for usage error queries (not used in write tests)
   449  	for _, rates := range throttleRates {
   450  		readErrorMatrix := model.Matrix{}
   451  		for i := 0; i < len(rates)/2; i++ {
   452  			readErrorMatrix = append(readErrorMatrix,
   453  				&model.SampleStream{
   454  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   455  					Values: []model.SamplePair{{Timestamp: 30000, Value: 0}},
   456  				},
   457  				&model.SampleStream{
   458  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   459  					Values: []model.SamplePair{{Timestamp: 30000, Value: 0}},
   460  				})
   461  		}
   462  		m.rangeValues = append(m.rangeValues, readErrorMatrix)
   463  	}
   464  }
   465  
   466  func (m *mockPrometheus) SetResponseForReads(usageRates [][]int, errorRates [][]int) {
   467  	// Mock metrics from Prometheus. In Read tests, these aren't used but must be
   468  	// filled out in a basic way for the underlying functions to get the right amount of prometheus results
   469  	m.rangeValues = []model.Value{
   470  		// Queue lengths ( not used)
   471  		model.Matrix{
   472  			&model.SampleStream{Values: []model.SamplePair{
   473  				{Timestamp: 0, Value: 0},
   474  				{Timestamp: 15000, Value: 0},
   475  				{Timestamp: 30000, Value: 0},
   476  			}},
   477  		},
   478  	}
   479  	// Error rates, for writes so not used in a read test. Here as a filler for the expected number of prom responses
   480  	for _, rates := range errorRates {
   481  		errorMatrix := model.Matrix{}
   482  		for i := 0; i < len(rates)/2; i++ {
   483  			errorMatrix = append(errorMatrix,
   484  				&model.SampleStream{
   485  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   486  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(0)}},
   487  				},
   488  				&model.SampleStream{
   489  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   490  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(0)}},
   491  				})
   492  		}
   493  		m.rangeValues = append(m.rangeValues, errorMatrix)
   494  	}
   495  	// usage rates, for writes so not used in a read test. Here as a filler for the expected number of prom responses
   496  	for _, rates := range errorRates {
   497  		errorMatrix := model.Matrix{}
   498  		for i := 0; i < len(rates)/2; i++ {
   499  			errorMatrix = append(errorMatrix,
   500  				&model.SampleStream{
   501  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   502  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(0)}},
   503  				},
   504  				&model.SampleStream{
   505  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   506  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(0)}},
   507  				})
   508  		}
   509  		m.rangeValues = append(m.rangeValues, errorMatrix)
   510  	}
   511  	// read usage metrics per table.
   512  	for _, rates := range usageRates {
   513  		readUsageMatrix := model.Matrix{}
   514  		for i := 0; i < len(rates)/2; i++ {
   515  			readUsageMatrix = append(readUsageMatrix,
   516  				&model.SampleStream{
   517  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   518  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2])}},
   519  				},
   520  				&model.SampleStream{
   521  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   522  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2+1])}},
   523  				})
   524  		}
   525  		m.rangeValues = append(m.rangeValues, readUsageMatrix)
   526  	}
   527  	// errors from read throttling, per table
   528  	for _, rates := range errorRates {
   529  		readErrorMatrix := model.Matrix{}
   530  		for i := 0; i < len(rates)/2; i++ {
   531  			readErrorMatrix = append(readErrorMatrix,
   532  				&model.SampleStream{
   533  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", tablePrefix, i))},
   534  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2])}},
   535  				},
   536  				&model.SampleStream{
   537  					Metric: model.Metric{"table": model.LabelValue(fmt.Sprintf("%s%d", chunkTablePrefix, i))},
   538  					Values: []model.SamplePair{{Timestamp: 30000, Value: model.SampleValue(rates[i*2+1])}},
   539  				})
   540  		}
   541  		m.rangeValues = append(m.rangeValues, readErrorMatrix)
   542  	}
   543  }
   544  
   545  func (m *mockPrometheus) QueryRange(ctx context.Context, query string, r promV1.Range) (model.Value, promV1.Warnings, error) {
   546  	if len(m.rangeValues) == 0 {
   547  		return nil, nil, errors.New("mockPrometheus.QueryRange: out of values")
   548  	}
   549  	// Take the first value and move the slice up
   550  	ret := m.rangeValues[0]
   551  	m.rangeValues = m.rangeValues[1:]
   552  	return ret, nil, nil
   553  }