github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/chunk/purger/purger_test.go (about)

     1  package purger
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sort"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/grafana/dskit/flagext"
    12  	"github.com/grafana/dskit/services"
    13  	"github.com/prometheus/client_golang/prometheus"
    14  	"github.com/prometheus/client_golang/prometheus/testutil"
    15  	"github.com/prometheus/common/model"
    16  	"github.com/prometheus/prometheus/promql/parser"
    17  	"github.com/stretchr/testify/require"
    18  
    19  	"github.com/cortexproject/cortex/pkg/chunk"
    20  	"github.com/cortexproject/cortex/pkg/chunk/testutils"
    21  	util_log "github.com/cortexproject/cortex/pkg/util/log"
    22  	"github.com/cortexproject/cortex/pkg/util/test"
    23  )
    24  
    25  const (
    26  	userID        = "userID"
    27  	modelTimeDay  = model.Time(millisecondPerDay)
    28  	modelTimeHour = model.Time(time.Hour / time.Millisecond)
    29  )
    30  
    31  func setupTestDeleteStore(t *testing.T) *DeleteStore {
    32  	var (
    33  		deleteStoreConfig DeleteStoreConfig
    34  		tbmConfig         chunk.TableManagerConfig
    35  		schemaCfg         = chunk.DefaultSchemaConfig("", "v10", 0)
    36  	)
    37  	flagext.DefaultValues(&deleteStoreConfig)
    38  	flagext.DefaultValues(&tbmConfig)
    39  
    40  	mockStorage := chunk.NewMockStorage()
    41  
    42  	extraTables := []chunk.ExtraTables{{TableClient: mockStorage, Tables: deleteStoreConfig.GetTables()}}
    43  	tableManager, err := chunk.NewTableManager(tbmConfig, schemaCfg, 12*time.Hour, mockStorage, nil, extraTables, nil)
    44  	require.NoError(t, err)
    45  
    46  	require.NoError(t, tableManager.SyncTables(context.Background()))
    47  
    48  	deleteStore, err := NewDeleteStore(deleteStoreConfig, mockStorage)
    49  	require.NoError(t, err)
    50  
    51  	return deleteStore
    52  }
    53  
    54  func setupStoresAndPurger(t *testing.T) (*DeleteStore, chunk.Store, chunk.ObjectClient, *Purger, *prometheus.Registry) {
    55  	deleteStore := setupTestDeleteStore(t)
    56  
    57  	chunkStore, err := testutils.SetupTestChunkStore()
    58  	require.NoError(t, err)
    59  
    60  	storageClient, err := testutils.SetupTestObjectStore()
    61  	require.NoError(t, err)
    62  
    63  	purger, registry := setupPurger(t, deleteStore, chunkStore, storageClient)
    64  
    65  	return deleteStore, chunkStore, storageClient, purger, registry
    66  }
    67  
    68  func setupPurger(t *testing.T, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient) (*Purger, *prometheus.Registry) {
    69  	registry := prometheus.NewRegistry()
    70  
    71  	var cfg Config
    72  	flagext.DefaultValues(&cfg)
    73  
    74  	purger, err := NewPurger(cfg, deleteStore, chunkStore, storageClient, registry)
    75  	require.NoError(t, err)
    76  
    77  	return purger, registry
    78  }
    79  
    80  func buildChunks(from, through model.Time, batchSize int) ([]chunk.Chunk, error) {
    81  	var chunks []chunk.Chunk
    82  	for ; from < through; from = from.Add(time.Hour) {
    83  		// creating batchSize number of chunks chunks per hour
    84  		_, testChunks, err := testutils.CreateChunks(0, batchSize, from, from.Add(time.Hour))
    85  		if err != nil {
    86  			return nil, err
    87  		}
    88  
    89  		chunks = append(chunks, testChunks...)
    90  	}
    91  
    92  	return chunks, nil
    93  }
    94  
    95  var purgePlanTestCases = []struct {
    96  	name                              string
    97  	chunkStoreDataInterval            model.Interval
    98  	deleteRequestInterval             model.Interval
    99  	expectedNumberOfPlans             int
   100  	numChunksToDelete                 int
   101  	firstChunkPartialDeletionInterval *Interval
   102  	lastChunkPartialDeletionInterval  *Interval
   103  	batchSize                         int
   104  }{
   105  	{
   106  		name:                   "deleting whole hour from a one hour data",
   107  		chunkStoreDataInterval: model.Interval{End: modelTimeHour},
   108  		deleteRequestInterval:  model.Interval{End: modelTimeHour},
   109  		expectedNumberOfPlans:  1,
   110  		numChunksToDelete:      1,
   111  	},
   112  	{
   113  		name:                   "deleting half a day from a days data",
   114  		chunkStoreDataInterval: model.Interval{End: modelTimeDay},
   115  		deleteRequestInterval:  model.Interval{End: model.Time(millisecondPerDay / 2)},
   116  		expectedNumberOfPlans:  1,
   117  		numChunksToDelete:      12 + 1, // one chunk for each hour + end time touches chunk at boundary
   118  		lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(millisecondPerDay / 2),
   119  			EndTimestampMs: int64(millisecondPerDay / 2)},
   120  	},
   121  	{
   122  		name:                   "deleting a full day from 2 days data",
   123  		chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2},
   124  		deleteRequestInterval:  model.Interval{End: modelTimeDay},
   125  		expectedNumberOfPlans:  1,
   126  		numChunksToDelete:      24 + 1, // one chunk for each hour + end time touches chunk at boundary
   127  		lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: millisecondPerDay,
   128  			EndTimestampMs: millisecondPerDay},
   129  	},
   130  	{
   131  		name:                   "deleting 2 days partially from 2 days data",
   132  		chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2},
   133  		deleteRequestInterval: model.Interval{Start: model.Time(millisecondPerDay / 2),
   134  			End: model.Time(millisecondPerDay + millisecondPerDay/2)},
   135  		expectedNumberOfPlans: 2,
   136  		numChunksToDelete:     24 + 2, // one chunk for each hour + start and end time touches chunk at boundary
   137  		firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(millisecondPerDay / 2),
   138  			EndTimestampMs: int64(millisecondPerDay / 2)},
   139  		lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: millisecondPerDay + millisecondPerDay/2,
   140  			EndTimestampMs: millisecondPerDay + millisecondPerDay/2},
   141  	},
   142  	{
   143  		name:                   "deleting 2 days partially, not aligned with hour, from 2 days data",
   144  		chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2},
   145  		deleteRequestInterval: model.Interval{Start: model.Time(millisecondPerDay / 2).Add(time.Minute),
   146  			End: model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Minute)},
   147  		expectedNumberOfPlans: 2,
   148  		numChunksToDelete:     24, // one chunk for each hour, no chunks touched at boundary
   149  		firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(model.Time(millisecondPerDay / 2).Add(time.Minute)),
   150  			EndTimestampMs: int64(model.Time(millisecondPerDay / 2).Add(time.Hour))},
   151  		lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Hour)),
   152  			EndTimestampMs: int64(model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Minute))},
   153  	},
   154  	{
   155  		name:                   "deleting data outside of period of existing data",
   156  		chunkStoreDataInterval: model.Interval{End: modelTimeDay},
   157  		deleteRequestInterval:  model.Interval{Start: model.Time(millisecondPerDay * 2), End: model.Time(millisecondPerDay * 3)},
   158  		expectedNumberOfPlans:  1,
   159  		numChunksToDelete:      0,
   160  	},
   161  	{
   162  		name:                   "building multi-day chunk and deleting part of it from first day",
   163  		chunkStoreDataInterval: model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(30 * time.Minute)},
   164  		deleteRequestInterval:  model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(-15 * time.Minute)},
   165  		expectedNumberOfPlans:  1,
   166  		numChunksToDelete:      1,
   167  		firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(modelTimeDay.Add(-30 * time.Minute)),
   168  			EndTimestampMs: int64(modelTimeDay.Add(-15 * time.Minute))},
   169  	},
   170  	{
   171  		name:                   "building multi-day chunk and deleting part of it for each day",
   172  		chunkStoreDataInterval: model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(30 * time.Minute)},
   173  		deleteRequestInterval:  model.Interval{Start: modelTimeDay.Add(-15 * time.Minute), End: modelTimeDay.Add(15 * time.Minute)},
   174  		expectedNumberOfPlans:  2,
   175  		numChunksToDelete:      1,
   176  		firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(modelTimeDay.Add(-15 * time.Minute)),
   177  			EndTimestampMs: int64(modelTimeDay.Add(15 * time.Minute))},
   178  	},
   179  }
   180  
   181  func TestPurger_BuildPlan(t *testing.T) {
   182  	for _, tc := range purgePlanTestCases {
   183  		for batchSize := 1; batchSize <= 5; batchSize++ {
   184  			t.Run(fmt.Sprintf("%s/batch-size=%d", tc.name, batchSize), func(t *testing.T) {
   185  				deleteStore, chunkStore, storageClient, purger, _ := setupStoresAndPurger(t)
   186  				defer func() {
   187  					purger.StopAsync()
   188  					chunkStore.Stop()
   189  				}()
   190  
   191  				chunks, err := buildChunks(tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, batchSize)
   192  				require.NoError(t, err)
   193  
   194  				require.NoError(t, chunkStore.Put(context.Background(), chunks))
   195  
   196  				err = deleteStore.AddDeleteRequest(context.Background(), userID, tc.deleteRequestInterval.Start,
   197  					tc.deleteRequestInterval.End, []string{"foo"})
   198  				require.NoError(t, err)
   199  
   200  				deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID)
   201  				require.NoError(t, err)
   202  
   203  				deleteRequest := deleteRequests[0]
   204  				requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger)
   205  
   206  				err = purger.buildDeletePlan(requestWithLogger)
   207  				require.NoError(t, err)
   208  				planPath := fmt.Sprintf("%s:%s/", userID, deleteRequest.RequestID)
   209  
   210  				plans, _, err := storageClient.List(context.Background(), planPath, "/")
   211  				require.NoError(t, err)
   212  				require.Equal(t, tc.expectedNumberOfPlans, len(plans))
   213  
   214  				numPlans := tc.expectedNumberOfPlans
   215  				var nilPurgePlanInterval *Interval
   216  				numChunks := 0
   217  
   218  				chunkIDs := map[string]struct{}{}
   219  
   220  				for i := range plans {
   221  					deletePlan, err := purger.getDeletePlan(context.Background(), userID, deleteRequest.RequestID, i)
   222  					require.NoError(t, err)
   223  					for _, chunksGroup := range deletePlan.ChunksGroup {
   224  						numChunksInGroup := len(chunksGroup.Chunks)
   225  						chunks := chunksGroup.Chunks
   226  						numChunks += numChunksInGroup
   227  
   228  						sort.Slice(chunks, func(i, j int) bool {
   229  							chunkI, err := chunk.ParseExternalKey(userID, chunks[i].ID)
   230  							require.NoError(t, err)
   231  
   232  							chunkJ, err := chunk.ParseExternalKey(userID, chunks[j].ID)
   233  							require.NoError(t, err)
   234  
   235  							return chunkI.From < chunkJ.From
   236  						})
   237  
   238  						for j, chunkDetails := range chunksGroup.Chunks {
   239  							chunkIDs[chunkDetails.ID] = struct{}{}
   240  							if i == 0 && j == 0 && tc.firstChunkPartialDeletionInterval != nil {
   241  								require.Equal(t, *tc.firstChunkPartialDeletionInterval, *chunkDetails.PartiallyDeletedInterval)
   242  							} else if i == numPlans-1 && j == numChunksInGroup-1 && tc.lastChunkPartialDeletionInterval != nil {
   243  								require.Equal(t, *tc.lastChunkPartialDeletionInterval, *chunkDetails.PartiallyDeletedInterval)
   244  							} else {
   245  								require.Equal(t, nilPurgePlanInterval, chunkDetails.PartiallyDeletedInterval)
   246  							}
   247  						}
   248  					}
   249  				}
   250  
   251  				require.Equal(t, tc.numChunksToDelete*batchSize, len(chunkIDs))
   252  				require.Equal(t, float64(tc.numChunksToDelete*batchSize), testutil.ToFloat64(purger.metrics.deleteRequestsChunksSelectedTotal))
   253  			})
   254  		}
   255  	}
   256  }
   257  
   258  func TestPurger_ExecutePlan(t *testing.T) {
   259  	fooMetricNameMatcher, err := parser.ParseMetricSelector(`foo`)
   260  	if err != nil {
   261  		t.Fatal(err)
   262  	}
   263  
   264  	for _, tc := range purgePlanTestCases {
   265  		for batchSize := 1; batchSize <= 5; batchSize++ {
   266  			t.Run(fmt.Sprintf("%s/batch-size=%d", tc.name, batchSize), func(t *testing.T) {
   267  				deleteStore, chunkStore, _, purger, _ := setupStoresAndPurger(t)
   268  				defer func() {
   269  					purger.StopAsync()
   270  					chunkStore.Stop()
   271  				}()
   272  
   273  				chunks, err := buildChunks(tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, batchSize)
   274  				require.NoError(t, err)
   275  
   276  				require.NoError(t, chunkStore.Put(context.Background(), chunks))
   277  
   278  				// calculate the expected number of chunks that should be there in store before deletion
   279  				chunkStoreDataIntervalTotal := tc.chunkStoreDataInterval.End - tc.chunkStoreDataInterval.Start
   280  				numChunksExpected := int(chunkStoreDataIntervalTotal / model.Time(time.Hour/time.Millisecond))
   281  
   282  				// see if store actually has expected number of chunks
   283  				chunks, err = chunkStore.Get(context.Background(), userID, tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, fooMetricNameMatcher...)
   284  				require.NoError(t, err)
   285  				require.Equal(t, numChunksExpected*batchSize, len(chunks))
   286  
   287  				// delete chunks
   288  				err = deleteStore.AddDeleteRequest(context.Background(), userID, tc.deleteRequestInterval.Start,
   289  					tc.deleteRequestInterval.End, []string{"foo"})
   290  				require.NoError(t, err)
   291  
   292  				// get the delete request
   293  				deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID)
   294  				require.NoError(t, err)
   295  
   296  				deleteRequest := deleteRequests[0]
   297  				requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger)
   298  				err = purger.buildDeletePlan(requestWithLogger)
   299  				require.NoError(t, err)
   300  
   301  				// execute all the plans
   302  				for i := 0; i < tc.expectedNumberOfPlans; i++ {
   303  					err := purger.executePlan(userID, deleteRequest.RequestID, i, requestWithLogger.logger)
   304  					require.NoError(t, err)
   305  				}
   306  
   307  				// calculate the expected number of chunks that should be there in store after deletion
   308  				numChunksExpectedAfterDeletion := 0
   309  				for chunkStart := tc.chunkStoreDataInterval.Start; chunkStart < tc.chunkStoreDataInterval.End; chunkStart += modelTimeHour {
   310  					numChunksExpectedAfterDeletion += len(getNonDeletedIntervals(model.Interval{Start: chunkStart, End: chunkStart + modelTimeHour}, tc.deleteRequestInterval))
   311  				}
   312  
   313  				// see if store actually has expected number of chunks
   314  				chunks, err = chunkStore.Get(context.Background(), userID, tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, fooMetricNameMatcher...)
   315  				require.NoError(t, err)
   316  				require.Equal(t, numChunksExpectedAfterDeletion*batchSize, len(chunks))
   317  			})
   318  		}
   319  	}
   320  }
   321  
   322  func TestPurger_Restarts(t *testing.T) {
   323  	fooMetricNameMatcher, err := parser.ParseMetricSelector(`foo`)
   324  	if err != nil {
   325  		t.Fatal(err)
   326  	}
   327  
   328  	deleteStore, chunkStore, storageClient, purger, _ := setupStoresAndPurger(t)
   329  	defer func() {
   330  		chunkStore.Stop()
   331  	}()
   332  
   333  	chunks, err := buildChunks(0, model.Time(0).Add(10*24*time.Hour), 1)
   334  	require.NoError(t, err)
   335  
   336  	require.NoError(t, chunkStore.Put(context.Background(), chunks))
   337  
   338  	// delete chunks
   339  	err = deleteStore.AddDeleteRequest(context.Background(), userID, model.Time(0).Add(24*time.Hour),
   340  		model.Time(0).Add(8*24*time.Hour), []string{"foo"})
   341  	require.NoError(t, err)
   342  
   343  	// get the delete request
   344  	deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID)
   345  	require.NoError(t, err)
   346  
   347  	deleteRequest := deleteRequests[0]
   348  	requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger)
   349  	err = purger.buildDeletePlan(requestWithLogger)
   350  	require.NoError(t, err)
   351  
   352  	// stop the existing purger
   353  	require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger))
   354  
   355  	// create a new purger to check whether it picks up in process delete requests
   356  	newPurger, _ := setupPurger(t, deleteStore, chunkStore, storageClient)
   357  
   358  	// load in process delete requests by calling Run
   359  	require.NoError(t, services.StartAndAwaitRunning(context.Background(), newPurger))
   360  
   361  	defer newPurger.StopAsync()
   362  
   363  	test.Poll(t, time.Minute, 0, func() interface{} {
   364  		return newPurger.inProcessRequests.len()
   365  	})
   366  
   367  	// check whether data got deleted from the store since delete request has been processed
   368  	chunks, err = chunkStore.Get(context.Background(), userID, 0, model.Time(0).Add(10*24*time.Hour), fooMetricNameMatcher...)
   369  	require.NoError(t, err)
   370  
   371  	// we are deleting 7 days out of 10 so there should we 3 days data left in store which means 72 chunks
   372  	require.Equal(t, 72, len(chunks))
   373  
   374  	deleteRequests, err = deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID)
   375  	require.NoError(t, err)
   376  	require.Equal(t, StatusProcessed, deleteRequests[0].Status)
   377  
   378  	require.Equal(t, float64(1), testutil.ToFloat64(newPurger.metrics.deleteRequestsProcessedTotal))
   379  	require.PanicsWithError(t, "collected 0 metrics instead of exactly 1", func() {
   380  		testutil.ToFloat64(newPurger.metrics.deleteRequestsProcessingFailures)
   381  	})
   382  }
   383  
   384  func TestPurger_Metrics(t *testing.T) {
   385  	deleteStore, chunkStore, storageClient, purger, registry := setupStoresAndPurger(t)
   386  	defer func() {
   387  		purger.StopAsync()
   388  		chunkStore.Stop()
   389  	}()
   390  
   391  	// add delete requests without starting purger loops to load and process delete requests.
   392  	// add delete request whose createdAt is now
   393  	err := deleteStore.AddDeleteRequest(context.Background(), userID, model.Time(0).Add(24*time.Hour),
   394  		model.Time(0).Add(2*24*time.Hour), []string{"foo"})
   395  	require.NoError(t, err)
   396  
   397  	// add delete request whose createdAt is 2 days back
   398  	err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-2*24*time.Hour), model.Time(0).Add(24*time.Hour),
   399  		model.Time(0).Add(2*24*time.Hour), []string{"foo"})
   400  	require.NoError(t, err)
   401  
   402  	// add delete request whose createdAt is 3 days back
   403  	err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-3*24*time.Hour), model.Time(0).Add(24*time.Hour),
   404  		model.Time(0).Add(8*24*time.Hour), []string{"foo"})
   405  	require.NoError(t, err)
   406  
   407  	// load new delete requests for processing
   408  	require.NoError(t, purger.pullDeleteRequestsToPlanDeletes())
   409  
   410  	// there must be 2 pending delete requests, oldest being 2 days old since its cancellation time is over
   411  	require.InDelta(t, float64(2*86400), testutil.ToFloat64(purger.metrics.oldestPendingDeleteRequestAgeSeconds), 1)
   412  	require.Equal(t, float64(2), testutil.ToFloat64(purger.metrics.pendingDeleteRequestsCount))
   413  
   414  	// stop the existing purger
   415  	require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger))
   416  
   417  	// create a new purger
   418  	purger, registry = setupPurger(t, deleteStore, chunkStore, storageClient)
   419  
   420  	// load in process delete requests by starting the service
   421  	require.NoError(t, services.StartAndAwaitRunning(context.Background(), purger))
   422  
   423  	defer purger.StopAsync()
   424  
   425  	// wait until purger_delete_requests_processed_total starts to show up.
   426  	test.Poll(t, 2*time.Second, 1, func() interface{} {
   427  		count, err := testutil.GatherAndCount(registry, "cortex_purger_delete_requests_processed_total")
   428  		require.NoError(t, err)
   429  		return count
   430  	})
   431  
   432  	// wait until both the pending delete requests are processed.
   433  	test.Poll(t, 2*time.Second, float64(2), func() interface{} {
   434  		return testutil.ToFloat64(purger.metrics.deleteRequestsProcessedTotal)
   435  	})
   436  
   437  	// wait until oldest pending request age becomes 0
   438  	test.Poll(t, 2*time.Second, float64(0), func() interface{} {
   439  		return testutil.ToFloat64(purger.metrics.oldestPendingDeleteRequestAgeSeconds)
   440  	})
   441  
   442  	// wait until pending delete requests count becomes 0
   443  	test.Poll(t, 2*time.Second, float64(0), func() interface{} {
   444  		return testutil.ToFloat64(purger.metrics.pendingDeleteRequestsCount)
   445  	})
   446  }
   447  
   448  func TestPurger_retryFailedRequests(t *testing.T) {
   449  	// setup chunks store
   450  	indexMockStorage := chunk.NewMockStorage()
   451  	chunksMockStorage := chunk.NewMockStorage()
   452  
   453  	deleteStore := setupTestDeleteStore(t)
   454  	chunkStore, err := testutils.SetupTestChunkStoreWithClients(indexMockStorage, chunksMockStorage, indexMockStorage)
   455  	require.NoError(t, err)
   456  
   457  	// create a purger instance
   458  	purgerMockStorage := chunk.NewMockStorage()
   459  	purger, _ := setupPurger(t, deleteStore, chunkStore, purgerMockStorage)
   460  	require.NoError(t, services.StartAndAwaitRunning(context.Background(), purger))
   461  
   462  	defer func() {
   463  		require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger))
   464  	}()
   465  
   466  	// add some chunks
   467  	chunks, err := buildChunks(0, model.Time(0).Add(3*24*time.Hour), 1)
   468  	require.NoError(t, err)
   469  
   470  	require.NoError(t, chunkStore.Put(context.Background(), chunks))
   471  
   472  	// add a request to delete some chunks
   473  	err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-25*time.Hour), model.Time(0).Add(24*time.Hour),
   474  		model.Time(0).Add(2*24*time.Hour), []string{"foo"})
   475  	require.NoError(t, err)
   476  
   477  	// change purgerMockStorage to allow only reads. This would fail putting plans to the storage and hence fail build plans operation.
   478  	purgerMockStorage.SetMode(chunk.MockStorageModeReadOnly)
   479  
   480  	// pull requests to process and ensure that it has failed.
   481  	err = purger.pullDeleteRequestsToPlanDeletes()
   482  	require.Error(t, err)
   483  	require.True(t, strings.Contains(err.Error(), "permission denied"))
   484  
   485  	// there must be 1 delete request in process and the userID must be in failed requests list.
   486  	require.NotNil(t, purger.inProcessRequests.get(userID))
   487  	require.Len(t, purger.inProcessRequests.listUsersWithFailedRequest(), 1)
   488  
   489  	// now allow writes to purgerMockStorage to allow building plans to succeed.
   490  	purgerMockStorage.SetMode(chunk.MockStorageModeReadWrite)
   491  
   492  	// but change mode of chunksMockStorage to read only which would deny permission to delete any chunks and in turn
   493  	// fail to execute delete plans.
   494  	chunksMockStorage.SetMode(chunk.MockStorageModeReadOnly)
   495  
   496  	// retry processing of failed requests
   497  	purger.retryFailedRequests()
   498  
   499  	// the delete request status should now change to StatusDeleting since the building of plan should have succeeded.
   500  	test.Poll(t, time.Second, StatusDeleting, func() interface{} {
   501  		return purger.inProcessRequests.get(userID).Status
   502  	})
   503  	// the request should have failed again since we did not give permission to delete chunks.
   504  	test.Poll(t, time.Second, 1, func() interface{} {
   505  		return len(purger.inProcessRequests.listUsersWithFailedRequest())
   506  	})
   507  
   508  	// now allow writes to chunksMockStorage so the requests do not fail anymore.
   509  	chunksMockStorage.SetMode(chunk.MockStorageModeReadWrite)
   510  
   511  	// retry processing of failed requests.
   512  	purger.retryFailedRequests()
   513  	// there must be no in process requests anymore.
   514  	test.Poll(t, time.Second, true, func() interface{} {
   515  		return purger.inProcessRequests.get(userID) == nil
   516  	})
   517  	// there must be no users having failed requests.
   518  	require.Len(t, purger.inProcessRequests.listUsersWithFailedRequest(), 0)
   519  }
   520  
   521  func getNonDeletedIntervals(originalInterval, deletedInterval model.Interval) []model.Interval {
   522  	nonDeletedIntervals := []model.Interval{}
   523  	if deletedInterval.Start > originalInterval.Start {
   524  		nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: originalInterval.Start, End: deletedInterval.Start - 1})
   525  	}
   526  
   527  	if deletedInterval.End < originalInterval.End {
   528  		nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: deletedInterval.End + 1, End: originalInterval.End})
   529  	}
   530  
   531  	return nonDeletedIntervals
   532  }