github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/chunk/chunk_store.go (about)

     1  package chunk
     2  
     3  import (
     4  	"context"
     5  	"flag"
     6  	"fmt"
     7  	"sort"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/go-kit/log"
    12  	"github.com/go-kit/log/level"
    13  	"github.com/pkg/errors"
    14  	"github.com/prometheus/client_golang/prometheus"
    15  	"github.com/prometheus/client_golang/prometheus/promauto"
    16  	"github.com/prometheus/common/model"
    17  	"github.com/prometheus/prometheus/pkg/labels"
    18  
    19  	"github.com/cortexproject/cortex/pkg/chunk/cache"
    20  	"github.com/cortexproject/cortex/pkg/chunk/encoding"
    21  	"github.com/cortexproject/cortex/pkg/util"
    22  	"github.com/cortexproject/cortex/pkg/util/extract"
    23  	util_log "github.com/cortexproject/cortex/pkg/util/log"
    24  	"github.com/cortexproject/cortex/pkg/util/spanlogger"
    25  	"github.com/cortexproject/cortex/pkg/util/validation"
    26  )
    27  
    28  var (
    29  	ErrQueryMustContainMetricName = QueryError("query must contain metric name")
    30  	ErrMetricNameLabelMissing     = errors.New("metric name label missing")
    31  	ErrParialDeleteChunkNoOverlap = errors.New("interval for partial deletion has not overlap with chunk interval")
    32  
    33  	indexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{
    34  		Namespace: "cortex",
    35  		Name:      "chunk_store_index_entries_per_chunk",
    36  		Help:      "Number of entries written to storage per chunk.",
    37  		Buckets:   prometheus.ExponentialBuckets(1, 2, 5),
    38  	})
    39  	cacheCorrupt = promauto.NewCounter(prometheus.CounterOpts{
    40  		Namespace: "cortex",
    41  		Name:      "cache_corrupt_chunks_total",
    42  		Help:      "Total count of corrupt chunks found in cache.",
    43  	})
    44  )
    45  
    46  // Query errors are to be treated as user errors, rather than storage errors.
    47  type QueryError string
    48  
    49  func (e QueryError) Error() string {
    50  	return string(e)
    51  }
    52  
    53  // StoreConfig specifies config for a ChunkStore
    54  type StoreConfig struct {
    55  	ChunkCacheConfig       cache.Config `yaml:"chunk_cache_config"`
    56  	WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"`
    57  
    58  	CacheLookupsOlderThan model.Duration `yaml:"cache_lookups_older_than"`
    59  
    60  	// Not visible in yaml because the setting shouldn't be common between ingesters and queriers.
    61  	// This exists in case we don't want to cache all the chunks but still want to take advantage of
    62  	// ingester chunk write deduplication. But for the queriers we need the full value. So when this option
    63  	// is set, use different caches for ingesters and queriers.
    64  	chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry
    65  
    66  	// When DisableIndexDeduplication is true and chunk is already there in cache, only index would be written to the store and not chunk.
    67  	DisableIndexDeduplication bool `yaml:"-"`
    68  }
    69  
    70  // RegisterFlags adds the flags required to config this to the given FlagSet
    71  func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) {
    72  	cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("store.chunks-cache.", "Cache config for chunks. ", f)
    73  	f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.")
    74  	cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f)
    75  
    76  	f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.")
    77  }
    78  
    79  // Validate validates the store config.
    80  func (cfg *StoreConfig) Validate(logger log.Logger) error {
    81  	if err := cfg.ChunkCacheConfig.Validate(); err != nil {
    82  		return err
    83  	}
    84  	if err := cfg.WriteDedupeCacheConfig.Validate(); err != nil {
    85  		return err
    86  	}
    87  	return nil
    88  }
    89  
    90  type baseStore struct {
    91  	cfg StoreConfig
    92  
    93  	index   IndexClient
    94  	chunks  Client
    95  	schema  BaseSchema
    96  	limits  StoreLimits
    97  	fetcher *Fetcher
    98  }
    99  
   100  func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (baseStore, error) {
   101  	fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks)
   102  	if err != nil {
   103  		return baseStore{}, err
   104  	}
   105  
   106  	return baseStore{
   107  		cfg:     cfg,
   108  		index:   index,
   109  		chunks:  chunks,
   110  		schema:  schema,
   111  		limits:  limits,
   112  		fetcher: fetcher,
   113  	}, nil
   114  }
   115  
   116  // Stop any background goroutines (ie in the cache.)
   117  func (c *baseStore) Stop() {
   118  	c.fetcher.storage.Stop()
   119  	c.fetcher.Stop()
   120  	c.index.Stop()
   121  }
   122  
   123  // store implements Store
   124  type store struct {
   125  	baseStore
   126  	schema StoreSchema
   127  }
   128  
   129  func newStore(cfg StoreConfig, schema StoreSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) {
   130  	rs, err := newBaseStore(cfg, schema, index, chunks, limits, chunksCache)
   131  	if err != nil {
   132  		return nil, err
   133  	}
   134  
   135  	return &store{
   136  		baseStore: rs,
   137  		schema:    schema,
   138  	}, nil
   139  }
   140  
   141  // Put implements Store
   142  func (c *store) Put(ctx context.Context, chunks []Chunk) error {
   143  	for _, chunk := range chunks {
   144  		if err := c.PutOne(ctx, chunk.From, chunk.Through, chunk); err != nil {
   145  			return err
   146  		}
   147  	}
   148  	return nil
   149  }
   150  
   151  // PutOne implements Store
   152  func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error {
   153  	log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne")
   154  	defer log.Finish()
   155  	chunks := []Chunk{chunk}
   156  
   157  	err := c.fetcher.storage.PutChunks(ctx, chunks)
   158  	if err != nil {
   159  		return err
   160  	}
   161  
   162  	if cacheErr := c.fetcher.writeBackCache(ctx, chunks); cacheErr != nil {
   163  		level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr)
   164  	}
   165  
   166  	writeReqs, err := c.calculateIndexEntries(chunk.UserID, from, through, chunk)
   167  	if err != nil {
   168  		return err
   169  	}
   170  
   171  	return c.index.BatchWrite(ctx, writeReqs)
   172  }
   173  
   174  // calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given.
   175  func (c *store) calculateIndexEntries(userID string, from, through model.Time, chunk Chunk) (WriteBatch, error) {
   176  	seenIndexEntries := map[string]struct{}{}
   177  
   178  	metricName := chunk.Metric.Get(labels.MetricName)
   179  	if metricName == "" {
   180  		return nil, ErrMetricNameLabelMissing
   181  	}
   182  
   183  	entries, err := c.schema.GetWriteEntries(from, through, userID, metricName, chunk.Metric, chunk.ExternalKey())
   184  	if err != nil {
   185  		return nil, err
   186  	}
   187  	indexEntriesPerChunk.Observe(float64(len(entries)))
   188  
   189  	// Remove duplicate entries based on tableName:hashValue:rangeValue
   190  	result := c.index.NewWriteBatch()
   191  	for _, entry := range entries {
   192  		key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue)
   193  		if _, ok := seenIndexEntries[key]; !ok {
   194  			seenIndexEntries[key] = struct{}{}
   195  			result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value)
   196  		}
   197  	}
   198  	return result, nil
   199  }
   200  
   201  // Get implements Store
   202  func (c *store) Get(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) {
   203  	log, ctx := spanlogger.New(ctx, "ChunkStore.Get")
   204  	defer log.Span.Finish()
   205  	level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers))
   206  
   207  	// Validate the query is within reasonable bounds.
   208  	metricName, matchers, shortcut, err := c.validateQuery(ctx, userID, &from, &through, allMatchers)
   209  	if err != nil {
   210  		return nil, err
   211  	} else if shortcut {
   212  		return nil, nil
   213  	}
   214  
   215  	log.Span.SetTag("metric", metricName)
   216  	return c.getMetricNameChunks(ctx, userID, from, through, matchers, metricName)
   217  }
   218  
   219  func (c *store) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) {
   220  	return nil, nil, errors.New("not implemented")
   221  }
   222  
   223  // LabelValuesForMetricName retrieves all label values for a single label name and metric name.
   224  func (c *baseStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) {
   225  	log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues")
   226  	defer log.Span.Finish()
   227  	level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName)
   228  
   229  	shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
   230  	if err != nil {
   231  		return nil, err
   232  	} else if shortcut {
   233  		return nil, nil
   234  	}
   235  
   236  	queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName)
   237  	if err != nil {
   238  		return nil, err
   239  	}
   240  
   241  	entries, err := c.lookupEntriesByQueries(ctx, queries)
   242  	if err != nil {
   243  		return nil, err
   244  	}
   245  
   246  	var result UniqueStrings
   247  	for _, entry := range entries {
   248  		_, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value)
   249  		if err != nil {
   250  			return nil, err
   251  		}
   252  		result.Add(string(labelValue))
   253  	}
   254  	return result.Strings(), nil
   255  }
   256  
   257  // LabelNamesForMetricName retrieves all label names for a metric name.
   258  func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
   259  	log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName")
   260  	defer log.Span.Finish()
   261  	level.Debug(log).Log("from", from, "through", through, "metricName", metricName)
   262  
   263  	shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
   264  	if err != nil {
   265  		return nil, err
   266  	} else if shortcut {
   267  		return nil, nil
   268  	}
   269  
   270  	chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, nil, metricName)
   271  	if err != nil {
   272  		return nil, err
   273  	}
   274  	level.Debug(log).Log("msg", "Chunks in index", "chunks", len(chunks))
   275  
   276  	// Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint
   277  	filtered := filterChunksByTime(from, through, chunks)
   278  	filtered, keys := filterChunksByUniqueFingerprint(filtered)
   279  	level.Debug(log).Log("msg", "Chunks post filtering", "chunks", len(chunks))
   280  
   281  	// Now fetch the actual chunk data from Memcache / S3
   282  	allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys)
   283  	if err != nil {
   284  		level.Error(log).Log("msg", "FetchChunks", "err", err)
   285  		return nil, err
   286  	}
   287  	return labelNamesFromChunks(allChunks), nil
   288  }
   289  
   290  func (c *baseStore) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) {
   291  	//nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time
   292  	log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange")
   293  	defer log.Span.Finish()
   294  
   295  	if *through < *from {
   296  		return false, QueryError(fmt.Sprintf("invalid query, through < from (%s < %s)", through, from))
   297  	}
   298  
   299  	maxQueryLength := c.limits.MaxQueryLength(userID)
   300  	if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength {
   301  		return false, QueryError(fmt.Sprintf(validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength))
   302  	}
   303  
   304  	now := model.Now()
   305  
   306  	if from.After(now) {
   307  		// time-span start is in future ... regard as legal
   308  		level.Info(log).Log("msg", "whole timerange in future, yield empty resultset", "through", through, "from", from, "now", now)
   309  		return true, nil
   310  	}
   311  
   312  	if through.After(now.Add(5 * time.Minute)) {
   313  		// time-span end is in future ... regard as legal
   314  		level.Info(log).Log("msg", "adjusting end timerange from future to now", "old_through", through, "new_through", now)
   315  		*through = now // Avoid processing future part - otherwise some schemas could fail with eg non-existent table gripes
   316  	}
   317  
   318  	return false, nil
   319  }
   320  
   321  func (c *baseStore) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) {
   322  	log, ctx := spanlogger.New(ctx, "store.validateQuery")
   323  	defer log.Span.Finish()
   324  
   325  	shortcut, err := c.validateQueryTimeRange(ctx, userID, from, through)
   326  	if err != nil {
   327  		return "", nil, false, err
   328  	}
   329  	if shortcut {
   330  		return "", nil, true, nil
   331  	}
   332  
   333  	// Check there is a metric name matcher of type equal,
   334  	metricNameMatcher, matchers, ok := extract.MetricNameMatcherFromMatchers(matchers)
   335  	if !ok || metricNameMatcher.Type != labels.MatchEqual {
   336  		return "", nil, false, ErrQueryMustContainMetricName
   337  	}
   338  
   339  	return metricNameMatcher.Value, matchers, false, nil
   340  }
   341  
   342  func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) {
   343  	log, ctx := spanlogger.New(ctx, "ChunkStore.getMetricNameChunks")
   344  	defer log.Finish()
   345  	level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "matchers", len(allMatchers))
   346  
   347  	filters, matchers := util.SplitFiltersAndMatchers(allMatchers)
   348  	chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, matchers, metricName)
   349  	if err != nil {
   350  		return nil, err
   351  	}
   352  	level.Debug(log).Log("Chunks in index", len(chunks))
   353  
   354  	// Filter out chunks that are not in the selected time range.
   355  	filtered := filterChunksByTime(from, through, chunks)
   356  	level.Debug(log).Log("Chunks post filtering", len(chunks))
   357  
   358  	maxChunksPerQuery := c.limits.MaxChunksPerQueryFromStore(userID)
   359  	if maxChunksPerQuery > 0 && len(filtered) > maxChunksPerQuery {
   360  		err := QueryError(fmt.Sprintf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(filtered), maxChunksPerQuery))
   361  		level.Error(log).Log("err", err)
   362  		return nil, err
   363  	}
   364  
   365  	// Now fetch the actual chunk data from Memcache / S3
   366  	keys := keysFromChunks(filtered)
   367  	allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys)
   368  	if err != nil {
   369  		return nil, err
   370  	}
   371  
   372  	// Filter out chunks based on the empty matchers in the query.
   373  	filteredChunks := filterChunksByMatchers(allChunks, filters)
   374  	return filteredChunks, nil
   375  }
   376  
   377  func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) {
   378  	log, ctx := spanlogger.New(ctx, "ChunkStore.lookupChunksByMetricName")
   379  	defer log.Finish()
   380  
   381  	// Just get chunks for metric if there are no matchers
   382  	if len(matchers) == 0 {
   383  		queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, metricName)
   384  		if err != nil {
   385  			return nil, err
   386  		}
   387  		level.Debug(log).Log("queries", len(queries))
   388  
   389  		entries, err := c.lookupEntriesByQueries(ctx, queries)
   390  		if err != nil {
   391  			return nil, err
   392  		}
   393  		level.Debug(log).Log("entries", len(entries))
   394  
   395  		chunkIDs, err := c.parseIndexEntries(ctx, entries, nil)
   396  		if err != nil {
   397  			return nil, err
   398  		}
   399  		level.Debug(log).Log("chunkIDs", len(chunkIDs))
   400  
   401  		return c.convertChunkIDsToChunks(ctx, userID, chunkIDs)
   402  	}
   403  
   404  	// Otherwise get chunks which include other matchers
   405  	incomingChunkIDs := make(chan []string)
   406  	incomingErrors := make(chan error)
   407  	for _, matcher := range matchers {
   408  		go func(matcher *labels.Matcher) {
   409  			chunkIDs, err := c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, nil)
   410  			if err != nil {
   411  				incomingErrors <- err
   412  			} else {
   413  				incomingChunkIDs <- chunkIDs
   414  			}
   415  		}(matcher)
   416  	}
   417  
   418  	// Receive chunkSets from all matchers
   419  	var chunkIDs []string
   420  	var lastErr error
   421  	var initialized bool
   422  	for i := 0; i < len(matchers); i++ {
   423  		select {
   424  		case incoming := <-incomingChunkIDs:
   425  			if !initialized {
   426  				chunkIDs = incoming
   427  				initialized = true
   428  			} else {
   429  				chunkIDs = intersectStrings(chunkIDs, incoming)
   430  			}
   431  		case err := <-incomingErrors:
   432  			lastErr = err
   433  		}
   434  	}
   435  	if lastErr != nil {
   436  		return nil, lastErr
   437  	}
   438  	level.Debug(log).Log("msg", "post intersection", "chunkIDs", len(chunkIDs))
   439  
   440  	// Convert IndexEntry's into chunks
   441  	return c.convertChunkIDsToChunks(ctx, userID, chunkIDs)
   442  }
   443  
   444  func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]IndexQuery) []IndexQuery) ([]string, error) {
   445  	formattedMatcher := formatMatcher(matcher)
   446  	log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", formattedMatcher)
   447  	defer log.Span.Finish()
   448  
   449  	var err error
   450  	var queries []IndexQuery
   451  	var labelName string
   452  	if matcher == nil {
   453  		queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName)
   454  	} else if matcher.Type == labels.MatchEqual {
   455  		labelName = matcher.Name
   456  		queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value)
   457  	} else {
   458  		labelName = matcher.Name
   459  		queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name)
   460  	}
   461  	if err != nil {
   462  		return nil, err
   463  	}
   464  	level.Debug(log).Log("matcher", formattedMatcher, "queries", len(queries))
   465  
   466  	if filter != nil {
   467  		queries = filter(queries)
   468  		level.Debug(log).Log("matcher", formattedMatcher, "filteredQueries", len(queries))
   469  	}
   470  
   471  	entries, err := c.lookupEntriesByQueries(ctx, queries)
   472  	if e, ok := err.(CardinalityExceededError); ok {
   473  		e.MetricName = metricName
   474  		e.LabelName = labelName
   475  		return nil, e
   476  	} else if err != nil {
   477  		return nil, err
   478  	}
   479  	level.Debug(log).Log("matcher", formattedMatcher, "entries", len(entries))
   480  
   481  	ids, err := c.parseIndexEntries(ctx, entries, matcher)
   482  	if err != nil {
   483  		return nil, err
   484  	}
   485  	level.Debug(log).Log("matcher", formattedMatcher, "ids", len(ids))
   486  
   487  	return ids, nil
   488  }
   489  
   490  // Using this function avoids logging of nil matcher, which works, but indirectly via panic and recover.
   491  // That confuses attached debugger, which wants to breakpoint on each panic.
   492  // Using simple check is also faster.
   493  func formatMatcher(matcher *labels.Matcher) string {
   494  	if matcher == nil {
   495  		return "nil"
   496  	}
   497  	return matcher.String()
   498  }
   499  
   500  func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) {
   501  	log, ctx := spanlogger.New(ctx, "store.lookupEntriesByQueries")
   502  	defer log.Span.Finish()
   503  
   504  	// Nothing to do if there are no queries.
   505  	if len(queries) == 0 {
   506  		return nil, nil
   507  	}
   508  
   509  	var lock sync.Mutex
   510  	var entries []IndexEntry
   511  	err := c.index.QueryPages(ctx, queries, func(query IndexQuery, resp ReadBatch) bool {
   512  		iter := resp.Iterator()
   513  		lock.Lock()
   514  		for iter.Next() {
   515  			entries = append(entries, IndexEntry{
   516  				TableName:  query.TableName,
   517  				HashValue:  query.HashValue,
   518  				RangeValue: iter.RangeValue(),
   519  				Value:      iter.Value(),
   520  			})
   521  		}
   522  		lock.Unlock()
   523  		return true
   524  	})
   525  	if err != nil {
   526  		level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "error querying storage", "err", err)
   527  	}
   528  	return entries, err
   529  }
   530  
   531  func (c *baseStore) parseIndexEntries(_ context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) {
   532  	// Nothing to do if there are no entries.
   533  	if len(entries) == 0 {
   534  		return nil, nil
   535  	}
   536  
   537  	matchSet := map[string]struct{}{}
   538  	if matcher != nil && matcher.Type == labels.MatchRegexp {
   539  		set := FindSetMatches(matcher.Value)
   540  		for _, v := range set {
   541  			matchSet[v] = struct{}{}
   542  		}
   543  	}
   544  
   545  	result := make([]string, 0, len(entries))
   546  	for _, entry := range entries {
   547  		chunkKey, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value)
   548  		if err != nil {
   549  			return nil, err
   550  		}
   551  
   552  		// If the matcher is like a set (=~"a|b|c|d|...") and
   553  		// the label value is not in that set move on.
   554  		if len(matchSet) > 0 {
   555  			if _, ok := matchSet[string(labelValue)]; !ok {
   556  				continue
   557  			}
   558  
   559  			// If its in the set, then add it to set, we don't need to run
   560  			// matcher on it again.
   561  			result = append(result, chunkKey)
   562  			continue
   563  		}
   564  
   565  		if matcher != nil && !matcher.Matches(string(labelValue)) {
   566  			continue
   567  		}
   568  		result = append(result, chunkKey)
   569  	}
   570  	// Return ids sorted and deduped because they will be merged with other sets.
   571  	sort.Strings(result)
   572  	result = uniqueStrings(result)
   573  	return result, nil
   574  }
   575  
   576  func (c *baseStore) convertChunkIDsToChunks(ctx context.Context, userID string, chunkIDs []string) ([]Chunk, error) {
   577  	chunkSet := make([]Chunk, 0, len(chunkIDs))
   578  	for _, chunkID := range chunkIDs {
   579  		chunk, err := ParseExternalKey(userID, chunkID)
   580  		if err != nil {
   581  			return nil, err
   582  		}
   583  		chunkSet = append(chunkSet, chunk)
   584  	}
   585  
   586  	return chunkSet, nil
   587  }
   588  
   589  func (c *store) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error {
   590  	metricName := metric.Get(model.MetricNameLabel)
   591  	if metricName == "" {
   592  		return ErrMetricNameLabelMissing
   593  	}
   594  
   595  	chunkWriteEntries, err := c.schema.GetWriteEntries(from, through, userID, string(metricName), metric, chunkID)
   596  	if err != nil {
   597  		return errors.Wrapf(err, "when getting index entries to delete for chunkID=%s", chunkID)
   598  	}
   599  
   600  	return c.deleteChunk(ctx, userID, chunkID, metric, chunkWriteEntries, partiallyDeletedInterval, func(chunk Chunk) error {
   601  		return c.PutOne(ctx, chunk.From, chunk.Through, chunk)
   602  	})
   603  }
   604  
   605  func (c *baseStore) deleteChunk(ctx context.Context,
   606  	userID string,
   607  	chunkID string,
   608  	metric labels.Labels,
   609  	chunkWriteEntries []IndexEntry,
   610  	partiallyDeletedInterval *model.Interval,
   611  	putChunkFunc func(chunk Chunk) error) error {
   612  
   613  	metricName := metric.Get(model.MetricNameLabel)
   614  	if metricName == "" {
   615  		return ErrMetricNameLabelMissing
   616  	}
   617  
   618  	// if chunk is partially deleted, fetch it, slice non-deleted portion and put it to store before deleting original chunk
   619  	if partiallyDeletedInterval != nil {
   620  		err := c.reboundChunk(ctx, userID, chunkID, *partiallyDeletedInterval, putChunkFunc)
   621  		if err != nil {
   622  			return errors.Wrapf(err, "chunkID=%s", chunkID)
   623  		}
   624  	}
   625  
   626  	batch := c.index.NewWriteBatch()
   627  	for i := range chunkWriteEntries {
   628  		batch.Delete(chunkWriteEntries[i].TableName, chunkWriteEntries[i].HashValue, chunkWriteEntries[i].RangeValue)
   629  	}
   630  
   631  	err := c.index.BatchWrite(ctx, batch)
   632  	if err != nil {
   633  		return errors.Wrapf(err, "when deleting index entries for chunkID=%s", chunkID)
   634  	}
   635  
   636  	err = c.chunks.DeleteChunk(ctx, userID, chunkID)
   637  	if err != nil {
   638  		if err == ErrStorageObjectNotFound {
   639  			return nil
   640  		}
   641  		return errors.Wrapf(err, "when deleting chunk from storage with chunkID=%s", chunkID)
   642  	}
   643  
   644  	return nil
   645  }
   646  
   647  func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error {
   648  	chunk, err := ParseExternalKey(userID, chunkID)
   649  	if err != nil {
   650  		return errors.Wrap(err, "when parsing external key")
   651  	}
   652  
   653  	if !intervalsOverlap(model.Interval{Start: chunk.From, End: chunk.Through}, partiallyDeletedInterval) {
   654  		return ErrParialDeleteChunkNoOverlap
   655  	}
   656  
   657  	chunks, err := c.fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID})
   658  	if err != nil {
   659  		if err == ErrStorageObjectNotFound {
   660  			return nil
   661  		}
   662  		return errors.Wrap(err, "when fetching chunk from storage for slicing")
   663  	}
   664  
   665  	if len(chunks) != 1 {
   666  		return fmt.Errorf("expected to get 1 chunk from storage got %d instead", len(chunks))
   667  	}
   668  
   669  	chunk = chunks[0]
   670  	var newChunks []*Chunk
   671  	if partiallyDeletedInterval.Start > chunk.From {
   672  		newChunk, err := chunk.Slice(chunk.From, partiallyDeletedInterval.Start-1)
   673  		if err != nil && err != encoding.ErrSliceNoDataInRange {
   674  			return errors.Wrapf(err, "when slicing chunk for interval %d - %d", chunk.From, partiallyDeletedInterval.Start-1)
   675  		}
   676  
   677  		if newChunk != nil {
   678  			newChunks = append(newChunks, newChunk)
   679  		}
   680  	}
   681  
   682  	if partiallyDeletedInterval.End < chunk.Through {
   683  		newChunk, err := chunk.Slice(partiallyDeletedInterval.End+1, chunk.Through)
   684  		if err != nil && err != encoding.ErrSliceNoDataInRange {
   685  			return errors.Wrapf(err, "when slicing chunk for interval %d - %d", partiallyDeletedInterval.End+1, chunk.Through)
   686  		}
   687  
   688  		if newChunk != nil {
   689  			newChunks = append(newChunks, newChunk)
   690  		}
   691  	}
   692  
   693  	for _, newChunk := range newChunks {
   694  		if err := newChunk.Encode(); err != nil {
   695  			return errors.Wrapf(err, "when encoding new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through)
   696  		}
   697  
   698  		err = putChunkFunc(*newChunk)
   699  		if err != nil {
   700  			return errors.Wrapf(err, "when putting new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through)
   701  		}
   702  	}
   703  
   704  	return nil
   705  }
   706  
   707  func (c *store) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error {
   708  	// SeriesID is something which is only used in SeriesStore so we need not do anything here
   709  	return nil
   710  }
   711  
   712  func (c *baseStore) GetChunkFetcher(_ model.Time) *Fetcher {
   713  	return c.fetcher
   714  }