github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/ingester/flush.go (about)

     1  package ingester
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"net/http"
     7  	"time"
     8  
     9  	"github.com/go-kit/log/level"
    10  	ot "github.com/opentracing/opentracing-go"
    11  	"github.com/prometheus/common/model"
    12  	"github.com/prometheus/prometheus/pkg/labels"
    13  	"golang.org/x/time/rate"
    14  
    15  	"github.com/cortexproject/cortex/pkg/chunk"
    16  	"github.com/cortexproject/cortex/pkg/util"
    17  	"github.com/cortexproject/cortex/pkg/util/log"
    18  )
    19  
    20  const (
    21  	// Backoff for retrying 'immediate' flushes. Only counts for queue
    22  	// position, not wallclock time.
    23  	flushBackoff = 1 * time.Second
    24  	// Lower bound on flushes per check period for rate-limiter
    25  	minFlushes = 100
    26  )
    27  
    28  // Flush triggers a flush of all the chunks and closes the flush queues.
    29  // Called from the Lifecycler as part of the ingester shutdown.
    30  func (i *Ingester) Flush() {
    31  	if i.cfg.BlocksStorageEnabled {
    32  		i.v2LifecyclerFlush()
    33  		return
    34  	}
    35  
    36  	level.Info(i.logger).Log("msg", "starting to flush all the chunks")
    37  	i.sweepUsers(true)
    38  	level.Info(i.logger).Log("msg", "chunks queued for flushing")
    39  
    40  	// Close the flush queues, to unblock waiting workers.
    41  	for _, flushQueue := range i.flushQueues {
    42  		flushQueue.Close()
    43  	}
    44  
    45  	i.flushQueuesDone.Wait()
    46  	level.Info(i.logger).Log("msg", "flushing of chunks complete")
    47  }
    48  
    49  // FlushHandler triggers a flush of all in memory chunks.  Mainly used for
    50  // local testing.
    51  func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) {
    52  	if i.cfg.BlocksStorageEnabled {
    53  		i.v2FlushHandler(w, r)
    54  		return
    55  	}
    56  
    57  	level.Info(i.logger).Log("msg", "starting to flush all the chunks")
    58  	i.sweepUsers(true)
    59  	level.Info(i.logger).Log("msg", "chunks queued for flushing")
    60  	w.WriteHeader(http.StatusNoContent)
    61  }
    62  
    63  type flushOp struct {
    64  	from      model.Time
    65  	userID    string
    66  	fp        model.Fingerprint
    67  	immediate bool
    68  }
    69  
    70  func (o *flushOp) Key() string {
    71  	return fmt.Sprintf("%s-%d-%v", o.userID, o.fp, o.immediate)
    72  }
    73  
    74  func (o *flushOp) Priority() int64 {
    75  	return -int64(o.from)
    76  }
    77  
    78  // sweepUsers periodically schedules series for flushing and garbage collects users with no series
    79  func (i *Ingester) sweepUsers(immediate bool) {
    80  	if i.chunkStore == nil {
    81  		return
    82  	}
    83  
    84  	oldest := model.Time(0)
    85  
    86  	for id, state := range i.userStates.cp() {
    87  		for pair := range state.fpToSeries.iter() {
    88  			state.fpLocker.Lock(pair.fp)
    89  			i.sweepSeries(id, pair.fp, pair.series, immediate)
    90  			i.removeFlushedChunks(state, pair.fp, pair.series)
    91  			first := pair.series.firstUnflushedChunkTime()
    92  			state.fpLocker.Unlock(pair.fp)
    93  
    94  			if first > 0 && (oldest == 0 || first < oldest) {
    95  				oldest = first
    96  			}
    97  		}
    98  	}
    99  
   100  	i.metrics.oldestUnflushedChunkTimestamp.Set(float64(oldest.Unix()))
   101  	i.setFlushRate()
   102  }
   103  
   104  // Compute a rate such to spread calls to the store over nearly all of the flush period,
   105  // for example if we have 600 items in the queue and period 1 min we will send 10.5 per second.
   106  // Note if the store can't keep up with this rate then it doesn't make any difference.
   107  func (i *Ingester) setFlushRate() {
   108  	totalQueueLength := 0
   109  	for _, q := range i.flushQueues {
   110  		totalQueueLength += q.Length()
   111  	}
   112  	const fudge = 1.05 // aim to finish a little bit before the end of the period
   113  	flushesPerSecond := float64(totalQueueLength) / i.cfg.FlushCheckPeriod.Seconds() * fudge
   114  	// Avoid going very slowly with tiny queues
   115  	if flushesPerSecond*i.cfg.FlushCheckPeriod.Seconds() < minFlushes {
   116  		flushesPerSecond = minFlushes / i.cfg.FlushCheckPeriod.Seconds()
   117  	}
   118  	level.Debug(i.logger).Log("msg", "computed flush rate", "rate", flushesPerSecond)
   119  	i.flushRateLimiter.SetLimit(rate.Limit(flushesPerSecond))
   120  }
   121  
   122  type flushReason int8
   123  
   124  const (
   125  	noFlush = iota
   126  	reasonImmediate
   127  	reasonMultipleChunksInSeries
   128  	reasonAged
   129  	reasonIdle
   130  	reasonStale
   131  	reasonSpreadFlush
   132  	// Following are flush outcomes
   133  	noUser
   134  	noSeries
   135  	noChunks
   136  	flushError
   137  	reasonDropped
   138  	maxFlushReason // Used for testing String() method. Should be last.
   139  )
   140  
   141  func (f flushReason) String() string {
   142  	switch f {
   143  	case noFlush:
   144  		return "NoFlush"
   145  	case reasonImmediate:
   146  		return "Immediate"
   147  	case reasonMultipleChunksInSeries:
   148  		return "MultipleChunksInSeries"
   149  	case reasonAged:
   150  		return "Aged"
   151  	case reasonIdle:
   152  		return "Idle"
   153  	case reasonStale:
   154  		return "Stale"
   155  	case reasonSpreadFlush:
   156  		return "Spread"
   157  	case noUser:
   158  		return "NoUser"
   159  	case noSeries:
   160  		return "NoSeries"
   161  	case noChunks:
   162  		return "NoChunksToFlush"
   163  	case flushError:
   164  		return "FlushError"
   165  	case reasonDropped:
   166  		return "Dropped"
   167  	default:
   168  		panic("unrecognised flushReason")
   169  	}
   170  }
   171  
   172  // sweepSeries schedules a series for flushing based on a set of criteria
   173  //
   174  // NB we don't close the head chunk here, as the series could wait in the queue
   175  // for some time, and we want to encourage chunks to be as full as possible.
   176  func (i *Ingester) sweepSeries(userID string, fp model.Fingerprint, series *memorySeries, immediate bool) {
   177  	if len(series.chunkDescs) <= 0 {
   178  		return
   179  	}
   180  
   181  	firstTime := series.firstTime()
   182  	flush := i.shouldFlushSeries(series, fp, immediate)
   183  	if flush == noFlush {
   184  		return
   185  	}
   186  
   187  	flushQueueIndex := int(uint64(fp) % uint64(i.cfg.ConcurrentFlushes))
   188  	if i.flushQueues[flushQueueIndex].Enqueue(&flushOp{firstTime, userID, fp, immediate}) {
   189  		i.metrics.seriesEnqueuedForFlush.WithLabelValues(flush.String()).Inc()
   190  		util.Event().Log("msg", "add to flush queue", "userID", userID, "reason", flush, "firstTime", firstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex)
   191  	}
   192  }
   193  
   194  func (i *Ingester) shouldFlushSeries(series *memorySeries, fp model.Fingerprint, immediate bool) flushReason {
   195  	if len(series.chunkDescs) == 0 {
   196  		return noFlush
   197  	}
   198  	if immediate {
   199  		for _, cd := range series.chunkDescs {
   200  			if !cd.flushed {
   201  				return reasonImmediate
   202  			}
   203  		}
   204  		return noFlush // Everything is flushed.
   205  	}
   206  
   207  	// Flush if we have more than one chunk, and haven't already flushed the first chunk
   208  	if len(series.chunkDescs) > 1 && !series.chunkDescs[0].flushed {
   209  		if series.chunkDescs[0].flushReason != noFlush {
   210  			return series.chunkDescs[0].flushReason
   211  		}
   212  		return reasonMultipleChunksInSeries
   213  	}
   214  	// Otherwise look in more detail at the first chunk
   215  	return i.shouldFlushChunk(series.chunkDescs[0], fp, series.isStale())
   216  }
   217  
   218  func (i *Ingester) shouldFlushChunk(c *desc, fp model.Fingerprint, lastValueIsStale bool) flushReason {
   219  	if c.flushed { // don't flush chunks we've already flushed
   220  		return noFlush
   221  	}
   222  
   223  	// Adjust max age slightly to spread flushes out over time
   224  	var jitter time.Duration
   225  	if i.cfg.ChunkAgeJitter != 0 {
   226  		jitter = time.Duration(fp) % i.cfg.ChunkAgeJitter
   227  	}
   228  	// Chunks should be flushed if they span longer than MaxChunkAge
   229  	if c.LastTime.Sub(c.FirstTime) > (i.cfg.MaxChunkAge - jitter) {
   230  		return reasonAged
   231  	}
   232  
   233  	// Chunk should be flushed if their last update is older then MaxChunkIdle.
   234  	if model.Now().Sub(c.LastUpdate) > i.cfg.MaxChunkIdle {
   235  		return reasonIdle
   236  	}
   237  
   238  	// A chunk that has a stale marker can be flushed if possible.
   239  	if i.cfg.MaxStaleChunkIdle > 0 &&
   240  		lastValueIsStale &&
   241  		model.Now().Sub(c.LastUpdate) > i.cfg.MaxStaleChunkIdle {
   242  		return reasonStale
   243  	}
   244  
   245  	return noFlush
   246  }
   247  
   248  func (i *Ingester) flushLoop(j int) {
   249  	defer func() {
   250  		level.Debug(i.logger).Log("msg", "Ingester.flushLoop() exited")
   251  		i.flushQueuesDone.Done()
   252  	}()
   253  
   254  	for {
   255  		o := i.flushQueues[j].Dequeue()
   256  		if o == nil {
   257  			return
   258  		}
   259  		op := o.(*flushOp)
   260  
   261  		if !op.immediate {
   262  			_ = i.flushRateLimiter.Wait(context.Background())
   263  		}
   264  		outcome, err := i.flushUserSeries(j, op.userID, op.fp, op.immediate)
   265  		i.metrics.seriesDequeuedOutcome.WithLabelValues(outcome.String()).Inc()
   266  		if err != nil {
   267  			level.Error(log.WithUserID(op.userID, i.logger)).Log("msg", "failed to flush user", "err", err)
   268  		}
   269  
   270  		// If we're exiting & we failed to flush, put the failed operation
   271  		// back in the queue at a later point.
   272  		if op.immediate && err != nil {
   273  			op.from = op.from.Add(flushBackoff)
   274  			i.flushQueues[j].Enqueue(op)
   275  		}
   276  	}
   277  }
   278  
   279  // Returns flush outcome (either original reason, if series was flushed, noFlush if it doesn't need flushing anymore, or one of the errors)
   280  func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) (flushReason, error) {
   281  	i.metrics.flushSeriesInProgress.Inc()
   282  	defer i.metrics.flushSeriesInProgress.Dec()
   283  
   284  	if i.preFlushUserSeries != nil {
   285  		i.preFlushUserSeries()
   286  	}
   287  
   288  	userState, ok := i.userStates.get(userID)
   289  	if !ok {
   290  		return noUser, nil
   291  	}
   292  
   293  	series, ok := userState.fpToSeries.get(fp)
   294  	if !ok {
   295  		return noSeries, nil
   296  	}
   297  
   298  	userState.fpLocker.Lock(fp)
   299  	reason := i.shouldFlushSeries(series, fp, immediate)
   300  	if reason == noFlush {
   301  		userState.fpLocker.Unlock(fp)
   302  		return noFlush, nil
   303  	}
   304  
   305  	// shouldFlushSeries() has told us we have at least one chunk.
   306  	// Make a copy of chunks descriptors slice, to avoid possible issues related to removing (and niling) elements from chunkDesc.
   307  	// This can happen if first chunk is already flushed -- removeFlushedChunks may set such chunk to nil.
   308  	// Since elements in the slice are pointers, we can still safely update chunk descriptors after the copy.
   309  	chunks := append([]*desc(nil), series.chunkDescs...)
   310  	if immediate {
   311  		series.closeHead(reasonImmediate)
   312  	} else if chunkReason := i.shouldFlushChunk(series.head(), fp, series.isStale()); chunkReason != noFlush {
   313  		series.closeHead(chunkReason)
   314  	} else {
   315  		// The head chunk doesn't need flushing; step back by one.
   316  		chunks = chunks[:len(chunks)-1]
   317  	}
   318  
   319  	if (reason == reasonIdle || reason == reasonStale) && series.headChunkClosed {
   320  		if minChunkLength := i.limits.MinChunkLength(userID); minChunkLength > 0 {
   321  			chunkLength := 0
   322  			for _, c := range chunks {
   323  				chunkLength += c.C.Len()
   324  			}
   325  			if chunkLength < minChunkLength {
   326  				userState.removeSeries(fp, series.metric)
   327  				i.metrics.memoryChunks.Sub(float64(len(chunks)))
   328  				i.metrics.droppedChunks.Add(float64(len(chunks)))
   329  				util.Event().Log(
   330  					"msg", "dropped chunks",
   331  					"userID", userID,
   332  					"numChunks", len(chunks),
   333  					"chunkLength", chunkLength,
   334  					"fp", fp,
   335  					"series", series.metric,
   336  					"queue", flushQueueIndex,
   337  				)
   338  				chunks = nil
   339  				reason = reasonDropped
   340  			}
   341  		}
   342  	}
   343  
   344  	userState.fpLocker.Unlock(fp)
   345  
   346  	if reason == reasonDropped {
   347  		return reason, nil
   348  	}
   349  
   350  	// No need to flush these chunks again.
   351  	for len(chunks) > 0 && chunks[0].flushed {
   352  		chunks = chunks[1:]
   353  	}
   354  
   355  	if len(chunks) == 0 {
   356  		return noChunks, nil
   357  	}
   358  
   359  	// flush the chunks without locking the series, as we don't want to hold the series lock for the duration of the dynamo/s3 rpcs.
   360  	ctx, cancel := context.WithTimeout(context.Background(), i.cfg.FlushOpTimeout)
   361  	defer cancel() // releases resources if slowOperation completes before timeout elapses
   362  
   363  	sp, ctx := ot.StartSpanFromContext(ctx, "flushUserSeries")
   364  	defer sp.Finish()
   365  	sp.SetTag("organization", userID)
   366  
   367  	util.Event().Log("msg", "flush chunks", "userID", userID, "reason", reason, "numChunks", len(chunks), "firstTime", chunks[0].FirstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex)
   368  	err := i.flushChunks(ctx, userID, fp, series.metric, chunks)
   369  	if err != nil {
   370  		return flushError, err
   371  	}
   372  
   373  	userState.fpLocker.Lock(fp)
   374  	for i := 0; i < len(chunks); i++ {
   375  		// Mark the chunks as flushed, so we can remove them after the retention period.
   376  		// We can safely use chunks[i] here, because elements are pointers to chunk descriptors.
   377  		chunks[i].flushed = true
   378  		chunks[i].LastUpdate = model.Now()
   379  	}
   380  	userState.fpLocker.Unlock(fp)
   381  	return reason, err
   382  }
   383  
   384  // must be called under fpLocker lock
   385  func (i *Ingester) removeFlushedChunks(userState *userState, fp model.Fingerprint, series *memorySeries) {
   386  	now := model.Now()
   387  	for len(series.chunkDescs) > 0 {
   388  		if series.chunkDescs[0].flushed && now.Sub(series.chunkDescs[0].LastUpdate) > i.cfg.RetainPeriod {
   389  			series.chunkDescs[0] = nil // erase reference so the chunk can be garbage-collected
   390  			series.chunkDescs = series.chunkDescs[1:]
   391  			i.metrics.memoryChunks.Dec()
   392  		} else {
   393  			break
   394  		}
   395  	}
   396  	if len(series.chunkDescs) == 0 {
   397  		userState.removeSeries(fp, series.metric)
   398  	}
   399  }
   400  
   401  func (i *Ingester) flushChunks(ctx context.Context, userID string, fp model.Fingerprint, metric labels.Labels, chunkDescs []*desc) error {
   402  	if i.preFlushChunks != nil {
   403  		i.preFlushChunks()
   404  	}
   405  
   406  	wireChunks := make([]chunk.Chunk, 0, len(chunkDescs))
   407  	for _, chunkDesc := range chunkDescs {
   408  		c := chunk.NewChunk(userID, fp, metric, chunkDesc.C, chunkDesc.FirstTime, chunkDesc.LastTime)
   409  		if err := c.Encode(); err != nil {
   410  			return err
   411  		}
   412  		wireChunks = append(wireChunks, c)
   413  	}
   414  
   415  	if err := i.chunkStore.Put(ctx, wireChunks); err != nil {
   416  		return err
   417  	}
   418  
   419  	// Record statistics only when actual put request did not return error.
   420  	for _, chunkDesc := range chunkDescs {
   421  		utilization, length, size := chunkDesc.C.Utilization(), chunkDesc.C.Len(), chunkDesc.C.Size()
   422  		util.Event().Log("msg", "chunk flushed", "userID", userID, "fp", fp, "series", metric, "nlabels", len(metric), "utilization", utilization, "length", length, "size", size, "firstTime", chunkDesc.FirstTime, "lastTime", chunkDesc.LastTime)
   423  		i.metrics.chunkUtilization.Observe(utilization)
   424  		i.metrics.chunkLength.Observe(float64(length))
   425  		i.metrics.chunkSize.Observe(float64(size))
   426  		i.metrics.chunkAge.Observe(model.Now().Sub(chunkDesc.FirstTime).Seconds())
   427  	}
   428  
   429  	return nil
   430  }