github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/ingester/instance.go (about)

     1  package ingester
     2  
     3  import (
     4  	"context"
     5  	"net/http"
     6  	"os"
     7  	"sync"
     8  	"syscall"
     9  	"time"
    10  
    11  	"github.com/go-kit/log/level"
    12  	"github.com/pkg/errors"
    13  	"github.com/prometheus/client_golang/prometheus"
    14  	"github.com/prometheus/client_golang/prometheus/promauto"
    15  	"github.com/prometheus/common/model"
    16  	"github.com/prometheus/prometheus/model/labels"
    17  	"github.com/prometheus/prometheus/tsdb/chunks"
    18  	tsdb_record "github.com/prometheus/prometheus/tsdb/record"
    19  	"github.com/weaveworks/common/httpgrpc"
    20  	"go.uber.org/atomic"
    21  
    22  	"github.com/grafana/loki/pkg/ingester/index"
    23  	"github.com/grafana/loki/pkg/iter"
    24  	"github.com/grafana/loki/pkg/logproto"
    25  	"github.com/grafana/loki/pkg/logql"
    26  	"github.com/grafana/loki/pkg/logql/syntax"
    27  	"github.com/grafana/loki/pkg/logqlmodel/stats"
    28  	"github.com/grafana/loki/pkg/querier/astmapper"
    29  	"github.com/grafana/loki/pkg/runtime"
    30  	"github.com/grafana/loki/pkg/storage/chunk"
    31  	"github.com/grafana/loki/pkg/storage/config"
    32  	"github.com/grafana/loki/pkg/usagestats"
    33  	"github.com/grafana/loki/pkg/util"
    34  	"github.com/grafana/loki/pkg/util/deletion"
    35  	util_log "github.com/grafana/loki/pkg/util/log"
    36  	"github.com/grafana/loki/pkg/util/math"
    37  	"github.com/grafana/loki/pkg/validation"
    38  )
    39  
    40  const (
    41  	queryBatchSize       = 128
    42  	queryBatchSampleSize = 512
    43  )
    44  
    45  var (
    46  	memoryStreams = promauto.NewGaugeVec(prometheus.GaugeOpts{
    47  		Namespace: "loki",
    48  		Name:      "ingester_memory_streams",
    49  		Help:      "The total number of streams in memory per tenant.",
    50  	}, []string{"tenant"})
    51  	streamsCreatedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
    52  		Namespace: "loki",
    53  		Name:      "ingester_streams_created_total",
    54  		Help:      "The total number of streams created per tenant.",
    55  	}, []string{"tenant"})
    56  	streamsRemovedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
    57  		Namespace: "loki",
    58  		Name:      "ingester_streams_removed_total",
    59  		Help:      "The total number of streams removed per tenant.",
    60  	}, []string{"tenant"})
    61  
    62  	streamsCountStats = usagestats.NewInt("ingester_streams_count")
    63  )
    64  
    65  type instance struct {
    66  	cfg *Config
    67  
    68  	buf     []byte // buffer used to compute fps.
    69  	streams *streamsMap
    70  
    71  	index  *index.Multi
    72  	mapper *fpMapper // using of mapper no longer needs mutex because reading from streams is lock-free
    73  
    74  	instanceID string
    75  
    76  	streamsCreatedTotal prometheus.Counter
    77  	streamsRemovedTotal prometheus.Counter
    78  
    79  	tailers   map[uint32]*tailer
    80  	tailerMtx sync.RWMutex
    81  
    82  	limiter *Limiter
    83  	configs *runtime.TenantConfigs
    84  
    85  	wal WAL
    86  
    87  	// Denotes whether the ingester should flush on shutdown.
    88  	// Currently only used by the WAL to signal when the disk is full.
    89  	flushOnShutdownSwitch *OnceSwitch
    90  
    91  	metrics *ingesterMetrics
    92  
    93  	chunkFilter chunk.RequestChunkFilterer
    94  }
    95  
    96  func newInstance(
    97  	cfg *Config,
    98  	periodConfigs []config.PeriodConfig,
    99  	instanceID string,
   100  	limiter *Limiter,
   101  	configs *runtime.TenantConfigs,
   102  	wal WAL,
   103  	metrics *ingesterMetrics,
   104  	flushOnShutdownSwitch *OnceSwitch,
   105  	chunkFilter chunk.RequestChunkFilterer,
   106  ) (*instance, error) {
   107  	invertedIndex, err := index.NewMultiInvertedIndex(periodConfigs, uint32(cfg.IndexShards))
   108  	if err != nil {
   109  		return nil, err
   110  	}
   111  	i := &instance{
   112  		cfg:        cfg,
   113  		streams:    newStreamsMap(),
   114  		buf:        make([]byte, 0, 1024),
   115  		index:      invertedIndex,
   116  		instanceID: instanceID,
   117  
   118  		streamsCreatedTotal: streamsCreatedTotal.WithLabelValues(instanceID),
   119  		streamsRemovedTotal: streamsRemovedTotal.WithLabelValues(instanceID),
   120  
   121  		tailers: map[uint32]*tailer{},
   122  		limiter: limiter,
   123  		configs: configs,
   124  
   125  		wal:                   wal,
   126  		metrics:               metrics,
   127  		flushOnShutdownSwitch: flushOnShutdownSwitch,
   128  
   129  		chunkFilter: chunkFilter,
   130  	}
   131  	i.mapper = newFPMapper(i.getLabelsFromFingerprint)
   132  	return i, err
   133  }
   134  
   135  // consumeChunk manually adds a chunk that was received during ingester chunk
   136  // transfer.
   137  func (i *instance) consumeChunk(ctx context.Context, ls labels.Labels, chunk *logproto.Chunk) error {
   138  	fp := i.getHashForLabels(ls)
   139  
   140  	s, _, _ := i.streams.LoadOrStoreNewByFP(fp,
   141  		func() (*stream, error) {
   142  			s := i.createStreamByFP(ls, fp)
   143  			s.chunkMtx.Lock()
   144  			return s, nil
   145  		},
   146  		func(s *stream) error {
   147  			s.chunkMtx.Lock()
   148  			return nil
   149  		},
   150  	)
   151  	defer s.chunkMtx.Unlock()
   152  
   153  	err := s.consumeChunk(ctx, chunk)
   154  	if err == nil {
   155  		i.metrics.memoryChunks.Inc()
   156  	}
   157  
   158  	return err
   159  }
   160  
   161  func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
   162  	record := recordPool.GetRecord()
   163  	record.UserID = i.instanceID
   164  	defer recordPool.PutRecord(record)
   165  
   166  	var appendErr error
   167  	for _, reqStream := range req.Streams {
   168  
   169  		s, _, err := i.streams.LoadOrStoreNew(reqStream.Labels,
   170  			func() (*stream, error) {
   171  				s, err := i.createStream(reqStream, record)
   172  				// Lock before adding to maps
   173  				if err == nil {
   174  					s.chunkMtx.Lock()
   175  				}
   176  				return s, err
   177  			},
   178  			func(s *stream) error {
   179  				s.chunkMtx.Lock()
   180  				return nil
   181  			},
   182  		)
   183  		if err != nil {
   184  			appendErr = err
   185  			continue
   186  		}
   187  
   188  		_, err = s.Push(ctx, reqStream.Entries, record, 0, false)
   189  		if err != nil {
   190  			appendErr = err
   191  		}
   192  		s.chunkMtx.Unlock()
   193  	}
   194  
   195  	if !record.IsEmpty() {
   196  		if err := i.wal.Log(record); err != nil {
   197  			if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOSPC {
   198  				i.metrics.walDiskFullFailures.Inc()
   199  				i.flushOnShutdownSwitch.TriggerAnd(func() {
   200  					level.Error(util_log.Logger).Log(
   201  						"msg",
   202  						"Error writing to WAL, disk full, no further messages will be logged for this error",
   203  					)
   204  				})
   205  			} else {
   206  				return err
   207  			}
   208  		}
   209  	}
   210  
   211  	return appendErr
   212  }
   213  
   214  func (i *instance) createStream(pushReqStream logproto.Stream, record *WALRecord) (*stream, error) {
   215  	// record is only nil when replaying WAL. We don't want to drop data when replaying a WAL after
   216  	// reducing the stream limits, for instance.
   217  	var err error
   218  	if record != nil {
   219  		err = i.limiter.AssertMaxStreamsPerUser(i.instanceID, i.streams.Len())
   220  	}
   221  
   222  	if err != nil {
   223  		if i.configs.LogStreamCreation(i.instanceID) {
   224  			level.Debug(util_log.Logger).Log(
   225  				"msg", "failed to create stream, exceeded limit",
   226  				"org_id", i.instanceID,
   227  				"err", err,
   228  				"stream", pushReqStream.Labels,
   229  			)
   230  		}
   231  
   232  		validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries)))
   233  		bytes := 0
   234  		for _, e := range pushReqStream.Entries {
   235  			bytes += len(e.Line)
   236  		}
   237  		validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
   238  		return nil, httpgrpc.Errorf(http.StatusTooManyRequests, validation.StreamLimitErrorMsg)
   239  	}
   240  
   241  	labels, err := syntax.ParseLabels(pushReqStream.Labels)
   242  	if err != nil {
   243  		if i.configs.LogStreamCreation(i.instanceID) {
   244  			level.Debug(util_log.Logger).Log(
   245  				"msg", "failed to create stream, failed to parse labels",
   246  				"org_id", i.instanceID,
   247  				"err", err,
   248  				"stream", pushReqStream.Labels,
   249  			)
   250  		}
   251  		return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
   252  	}
   253  	fp := i.getHashForLabels(labels)
   254  
   255  	sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp)
   256  	s := newStream(i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.metrics)
   257  
   258  	// record will be nil when replaying the wal (we don't want to rewrite wal entries as we replay them).
   259  	if record != nil {
   260  		record.Series = append(record.Series, tsdb_record.RefSeries{
   261  			Ref:    chunks.HeadSeriesRef(fp),
   262  			Labels: sortedLabels,
   263  		})
   264  	} else {
   265  		// If the record is nil, this is a WAL recovery.
   266  		i.metrics.recoveredStreamsTotal.Inc()
   267  	}
   268  
   269  	memoryStreams.WithLabelValues(i.instanceID).Inc()
   270  	i.streamsCreatedTotal.Inc()
   271  	i.addTailersToNewStream(s)
   272  	streamsCountStats.Add(1)
   273  
   274  	if i.configs.LogStreamCreation(i.instanceID) {
   275  		level.Debug(util_log.Logger).Log(
   276  			"msg", "successfully created stream",
   277  			"org_id", i.instanceID,
   278  			"stream", pushReqStream.Labels,
   279  		)
   280  	}
   281  
   282  	return s, nil
   283  }
   284  
   285  func (i *instance) createStreamByFP(ls labels.Labels, fp model.Fingerprint) *stream {
   286  	sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(ls), fp)
   287  	s := newStream(i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.metrics)
   288  
   289  	i.streamsCreatedTotal.Inc()
   290  	memoryStreams.WithLabelValues(i.instanceID).Inc()
   291  	i.addTailersToNewStream(s)
   292  
   293  	return s
   294  }
   295  
   296  // getOrCreateStream returns the stream or creates it.
   297  // It's safe to use this function if returned stream is not consistency sensitive to streamsMap(e.g. ingesterRecoverer),
   298  // otherwise use streamsMap.LoadOrStoreNew with locking stream's chunkMtx inside.
   299  func (i *instance) getOrCreateStream(pushReqStream logproto.Stream, record *WALRecord) (*stream, error) {
   300  	s, _, err := i.streams.LoadOrStoreNew(pushReqStream.Labels, func() (*stream, error) {
   301  		return i.createStream(pushReqStream, record)
   302  	}, nil)
   303  
   304  	return s, err
   305  }
   306  
   307  // removeStream removes a stream from the instance.
   308  func (i *instance) removeStream(s *stream) {
   309  	if i.streams.Delete(s) {
   310  		i.index.Delete(s.labels, s.fp)
   311  		i.streamsRemovedTotal.Inc()
   312  		memoryStreams.WithLabelValues(i.instanceID).Dec()
   313  		streamsCountStats.Add(-1)
   314  	}
   315  }
   316  
   317  func (i *instance) getHashForLabels(ls labels.Labels) model.Fingerprint {
   318  	var fp uint64
   319  	fp, i.buf = ls.HashWithoutLabels(i.buf, []string(nil)...)
   320  	return i.mapper.mapFP(model.Fingerprint(fp), ls)
   321  }
   322  
   323  // Return labels associated with given fingerprint. Used by fingerprint mapper.
   324  func (i *instance) getLabelsFromFingerprint(fp model.Fingerprint) labels.Labels {
   325  	s, ok := i.streams.LoadByFP(fp)
   326  	if !ok {
   327  		return nil
   328  	}
   329  	return s.labels
   330  }
   331  
   332  func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
   333  	expr, err := req.LogSelector()
   334  	if err != nil {
   335  		return nil, err
   336  	}
   337  
   338  	pipeline, err := expr.Pipeline()
   339  	if err != nil {
   340  		return nil, err
   341  	}
   342  
   343  	pipeline, err = deletion.SetupPipeline(req, pipeline)
   344  	if err != nil {
   345  		return nil, err
   346  	}
   347  
   348  	stats := stats.FromContext(ctx)
   349  	var iters []iter.EntryIterator
   350  
   351  	shard, err := parseShardFromRequest(req.Shards)
   352  	if err != nil {
   353  		return nil, err
   354  	}
   355  
   356  	err = i.forMatchingStreams(
   357  		ctx,
   358  		req.Start,
   359  		expr.Matchers(),
   360  		shard,
   361  		func(stream *stream) error {
   362  			iter, err := stream.Iterator(ctx, stats, req.Start, req.End, req.Direction, pipeline.ForStream(stream.labels))
   363  			if err != nil {
   364  				return err
   365  			}
   366  			iters = append(iters, iter)
   367  			return nil
   368  		},
   369  	)
   370  	if err != nil {
   371  		return nil, err
   372  	}
   373  
   374  	return iter.NewSortEntryIterator(iters, req.Direction), nil
   375  }
   376  
   377  func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
   378  	expr, err := req.Expr()
   379  	if err != nil {
   380  		return nil, err
   381  	}
   382  
   383  	extractor, err := expr.Extractor()
   384  	if err != nil {
   385  		return nil, err
   386  	}
   387  
   388  	extractor, err = deletion.SetupExtractor(req, extractor)
   389  	if err != nil {
   390  		return nil, err
   391  	}
   392  
   393  	stats := stats.FromContext(ctx)
   394  	var iters []iter.SampleIterator
   395  
   396  	var shard *astmapper.ShardAnnotation
   397  	shards, err := logql.ParseShards(req.Shards)
   398  	if err != nil {
   399  		return nil, err
   400  	}
   401  	if len(shards) > 1 {
   402  		return nil, errors.New("only one shard per ingester query is supported")
   403  	}
   404  	if len(shards) == 1 {
   405  		shard = &shards[0]
   406  	}
   407  
   408  	err = i.forMatchingStreams(
   409  		ctx,
   410  		req.Start,
   411  		expr.Selector().Matchers(),
   412  		shard,
   413  		func(stream *stream) error {
   414  			iter, err := stream.SampleIterator(ctx, stats, req.Start, req.End, extractor.ForStream(stream.labels))
   415  			if err != nil {
   416  				return err
   417  			}
   418  			iters = append(iters, iter)
   419  			return nil
   420  		},
   421  	)
   422  	if err != nil {
   423  		return nil, err
   424  	}
   425  
   426  	return iter.NewSortSampleIterator(iters), nil
   427  }
   428  
   429  // Label returns the label names or values depending on the given request
   430  // Without label matchers the label names and values are retrieved from the index directly.
   431  // If label matchers are given only the matching streams are fetched from the index.
   432  // The label names or values are then retrieved from those matching streams.
   433  func (i *instance) Label(ctx context.Context, req *logproto.LabelRequest, matchers ...*labels.Matcher) (*logproto.LabelResponse, error) {
   434  	if len(matchers) == 0 {
   435  		var labels []string
   436  		if req.Values {
   437  			values, err := i.index.LabelValues(*req.Start, req.Name, nil)
   438  			if err != nil {
   439  				return nil, err
   440  			}
   441  			labels = make([]string, len(values))
   442  			for i := 0; i < len(values); i++ {
   443  				labels[i] = values[i]
   444  			}
   445  			return &logproto.LabelResponse{
   446  				Values: labels,
   447  			}, nil
   448  		}
   449  		names, err := i.index.LabelNames(*req.Start, nil)
   450  		if err != nil {
   451  			return nil, err
   452  		}
   453  		labels = make([]string, len(names))
   454  		for i := 0; i < len(names); i++ {
   455  			labels[i] = names[i]
   456  		}
   457  		return &logproto.LabelResponse{
   458  			Values: labels,
   459  		}, nil
   460  	}
   461  
   462  	labels := make([]string, 0)
   463  	err := i.forMatchingStreams(ctx, *req.Start, matchers, nil, func(s *stream) error {
   464  		for _, label := range s.labels {
   465  			if req.Values && label.Name == req.Name {
   466  				labels = append(labels, label.Value)
   467  				continue
   468  			}
   469  			if !req.Values {
   470  				labels = append(labels, label.Name)
   471  			}
   472  		}
   473  		return nil
   474  	})
   475  	if err != nil {
   476  		return nil, err
   477  	}
   478  
   479  	return &logproto.LabelResponse{
   480  		Values: labels,
   481  	}, nil
   482  }
   483  
   484  func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) {
   485  	groups, err := logql.Match(req.GetGroups())
   486  	if err != nil {
   487  		return nil, err
   488  	}
   489  	shard, err := parseShardFromRequest(req.Shards)
   490  	if err != nil {
   491  		return nil, err
   492  	}
   493  
   494  	var series []logproto.SeriesIdentifier
   495  
   496  	// If no matchers were supplied we include all streams.
   497  	if len(groups) == 0 {
   498  		series = make([]logproto.SeriesIdentifier, 0, i.streams.Len())
   499  		err = i.forMatchingStreams(ctx, req.Start, nil, shard, func(stream *stream) error {
   500  			// consider the stream only if it overlaps the request time range
   501  			if shouldConsiderStream(stream, req.Start, req.End) {
   502  				series = append(series, logproto.SeriesIdentifier{
   503  					Labels: stream.labels.Map(),
   504  				})
   505  			}
   506  			return nil
   507  		})
   508  		if err != nil {
   509  			return nil, err
   510  		}
   511  	} else {
   512  		dedupedSeries := make(map[uint64]logproto.SeriesIdentifier)
   513  		for _, matchers := range groups {
   514  			err = i.forMatchingStreams(ctx, req.Start, matchers, shard, func(stream *stream) error {
   515  				// consider the stream only if it overlaps the request time range
   516  				if shouldConsiderStream(stream, req.Start, req.End) {
   517  					// exit early when this stream was added by an earlier group
   518  					key := stream.labels.Hash()
   519  					if _, found := dedupedSeries[key]; found {
   520  						return nil
   521  					}
   522  
   523  					dedupedSeries[key] = logproto.SeriesIdentifier{
   524  						Labels: stream.labels.Map(),
   525  					}
   526  				}
   527  				return nil
   528  			})
   529  			if err != nil {
   530  				return nil, err
   531  			}
   532  		}
   533  		series = make([]logproto.SeriesIdentifier, 0, len(dedupedSeries))
   534  		for _, v := range dedupedSeries {
   535  			series = append(series, v)
   536  		}
   537  	}
   538  
   539  	return &logproto.SeriesResponse{Series: series}, nil
   540  }
   541  
   542  func (i *instance) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
   543  	matchers, err := syntax.ParseMatchers(req.Matchers)
   544  	if err != nil {
   545  		return nil, err
   546  	}
   547  
   548  	res := &logproto.IndexStatsResponse{}
   549  	from, through := req.From.Time(), req.Through.Time()
   550  
   551  	if err = i.forMatchingStreams(ctx, from, matchers, nil, func(s *stream) error {
   552  		// checks for equality against chunk flush fields
   553  		var zeroValueTime time.Time
   554  
   555  		// Consider streams which overlap our time range
   556  		if shouldConsiderStream(s, from, through) {
   557  			s.chunkMtx.RLock()
   558  			res.Streams++
   559  			for _, chk := range s.chunks {
   560  				// Consider chunks which overlap our time range
   561  				// and haven't been flushed.
   562  				// Flushed chunks will already be counted
   563  				// by the TSDB manager+shipper
   564  				chkFrom, chkThrough := chk.chunk.Bounds()
   565  
   566  				if !chk.flushed.Equal(zeroValueTime) && from.Before(chkThrough) && through.After(chkFrom) {
   567  					res.Chunks++
   568  					res.Entries += uint64(chk.chunk.Size())
   569  					res.Bytes += uint64(chk.chunk.UncompressedSize())
   570  				}
   571  
   572  			}
   573  			s.chunkMtx.RUnlock()
   574  		}
   575  		return nil
   576  	}); err != nil {
   577  		return nil, err
   578  	}
   579  
   580  	return res, nil
   581  }
   582  
   583  func (i *instance) numStreams() int {
   584  	return i.streams.Len()
   585  }
   586  
   587  // forAllStreams will execute a function for all streams in the instance.
   588  // It uses a function in order to enable generic stream access without accidentally leaking streams under the mutex.
   589  func (i *instance) forAllStreams(ctx context.Context, fn func(*stream) error) error {
   590  	var chunkFilter chunk.Filterer
   591  	if i.chunkFilter != nil {
   592  		chunkFilter = i.chunkFilter.ForRequest(ctx)
   593  	}
   594  
   595  	err := i.streams.ForEach(func(s *stream) (bool, error) {
   596  		if chunkFilter != nil && chunkFilter.ShouldFilter(s.labels) {
   597  			return true, nil
   598  		}
   599  		err := fn(s)
   600  		if err != nil {
   601  			return false, err
   602  		}
   603  		return true, nil
   604  	})
   605  	if err != nil {
   606  		return err
   607  	}
   608  	return nil
   609  }
   610  
   611  // forMatchingStreams will execute a function for each stream that satisfies a set of requirements (time range, matchers, etc).
   612  // It uses a function in order to enable generic stream access without accidentally leaking streams under the mutex.
   613  func (i *instance) forMatchingStreams(
   614  	ctx context.Context,
   615  	// ts denotes the beginning of the request
   616  	// and is used to select the correct inverted index
   617  	ts time.Time,
   618  	matchers []*labels.Matcher,
   619  	shards *astmapper.ShardAnnotation,
   620  	fn func(*stream) error,
   621  ) error {
   622  	filters, matchers := util.SplitFiltersAndMatchers(matchers)
   623  	ids, err := i.index.Lookup(ts, matchers, shards)
   624  	if err != nil {
   625  		return err
   626  	}
   627  	var chunkFilter chunk.Filterer
   628  	if i.chunkFilter != nil {
   629  		chunkFilter = i.chunkFilter.ForRequest(ctx)
   630  	}
   631  outer:
   632  	for _, streamID := range ids {
   633  		stream, ok := i.streams.LoadByFP(streamID)
   634  		if !ok {
   635  			// If a stream is missing here, it has already been flushed
   636  			// and is supposed to be picked up from storage by querier
   637  			continue
   638  		}
   639  		for _, filter := range filters {
   640  			if !filter.Matches(stream.labels.Get(filter.Name)) {
   641  				continue outer
   642  			}
   643  		}
   644  		if chunkFilter != nil && chunkFilter.ShouldFilter(stream.labels) {
   645  			continue
   646  		}
   647  		err := fn(stream)
   648  		if err != nil {
   649  			return err
   650  		}
   651  	}
   652  	return nil
   653  }
   654  
   655  func (i *instance) addNewTailer(ctx context.Context, t *tailer) error {
   656  	if err := i.forMatchingStreams(ctx, time.Now(), t.matchers, nil, func(s *stream) error {
   657  		s.addTailer(t)
   658  		return nil
   659  	}); err != nil {
   660  		return err
   661  	}
   662  	i.tailerMtx.Lock()
   663  	defer i.tailerMtx.Unlock()
   664  	i.tailers[t.getID()] = t
   665  	return nil
   666  }
   667  
   668  func (i *instance) addTailersToNewStream(stream *stream) {
   669  	i.tailerMtx.RLock()
   670  	defer i.tailerMtx.RUnlock()
   671  
   672  	for _, t := range i.tailers {
   673  		// we don't want to watch streams for closed tailers.
   674  		// When a new tail request comes in we will clean references to closed tailers
   675  		if t.isClosed() {
   676  			continue
   677  		}
   678  		var chunkFilter chunk.Filterer
   679  		if i.chunkFilter != nil {
   680  			chunkFilter = i.chunkFilter.ForRequest(t.conn.Context())
   681  		}
   682  
   683  		if isMatching(stream.labels, t.matchers) {
   684  			if chunkFilter != nil && chunkFilter.ShouldFilter(stream.labels) {
   685  				continue
   686  			}
   687  			stream.addTailer(t)
   688  		}
   689  	}
   690  }
   691  
   692  func (i *instance) checkClosedTailers() {
   693  	closedTailers := []uint32{}
   694  
   695  	i.tailerMtx.RLock()
   696  	for _, t := range i.tailers {
   697  		if t.isClosed() {
   698  			closedTailers = append(closedTailers, t.getID())
   699  			continue
   700  		}
   701  	}
   702  	i.tailerMtx.RUnlock()
   703  
   704  	if len(closedTailers) != 0 {
   705  		i.tailerMtx.Lock()
   706  		defer i.tailerMtx.Unlock()
   707  		for _, closedTailer := range closedTailers {
   708  			delete(i.tailers, closedTailer)
   709  		}
   710  	}
   711  }
   712  
   713  func (i *instance) closeTailers() {
   714  	i.tailerMtx.Lock()
   715  	defer i.tailerMtx.Unlock()
   716  	for _, t := range i.tailers {
   717  		t.close()
   718  	}
   719  }
   720  
   721  func (i *instance) openTailersCount() uint32 {
   722  	i.checkClosedTailers()
   723  
   724  	i.tailerMtx.RLock()
   725  	defer i.tailerMtx.RUnlock()
   726  
   727  	return uint32(len(i.tailers))
   728  }
   729  
   730  func parseShardFromRequest(reqShards []string) (*astmapper.ShardAnnotation, error) {
   731  	var shard *astmapper.ShardAnnotation
   732  	shards, err := logql.ParseShards(reqShards)
   733  	if err != nil {
   734  		return nil, err
   735  	}
   736  	if len(shards) > 1 {
   737  		return nil, errors.New("only one shard per ingester query is supported")
   738  	}
   739  	if len(shards) == 1 {
   740  		shard = &shards[0]
   741  	}
   742  	return shard, nil
   743  }
   744  
   745  func isDone(ctx context.Context) bool {
   746  	select {
   747  	case <-ctx.Done():
   748  		return true
   749  	default:
   750  		return false
   751  	}
   752  }
   753  
   754  // QuerierQueryServer is the GRPC server stream we use to send batch of entries.
   755  type QuerierQueryServer interface {
   756  	Context() context.Context
   757  	Send(res *logproto.QueryResponse) error
   758  }
   759  
   760  func sendBatches(ctx context.Context, i iter.EntryIterator, queryServer QuerierQueryServer, limit int32) error {
   761  	stats := stats.FromContext(ctx)
   762  
   763  	// send until the limit is reached.
   764  	for limit != 0 && !isDone(ctx) {
   765  		fetchSize := uint32(queryBatchSize)
   766  		if limit > 0 {
   767  			fetchSize = math.MinUint32(queryBatchSize, uint32(limit))
   768  		}
   769  		batch, batchSize, err := iter.ReadBatch(i, fetchSize)
   770  		if err != nil {
   771  			return err
   772  		}
   773  
   774  		if limit > 0 {
   775  			limit -= int32(batchSize)
   776  		}
   777  
   778  		if len(batch.Streams) == 0 {
   779  			return nil
   780  		}
   781  
   782  		stats.AddIngesterBatch(int64(batchSize))
   783  		batch.Stats = stats.Ingester()
   784  
   785  		if err := queryServer.Send(batch); err != nil {
   786  			return err
   787  		}
   788  		stats.Reset()
   789  	}
   790  	return nil
   791  }
   792  
   793  func sendSampleBatches(ctx context.Context, it iter.SampleIterator, queryServer logproto.Querier_QuerySampleServer) error {
   794  	stats := stats.FromContext(ctx)
   795  	for !isDone(ctx) {
   796  		batch, size, err := iter.ReadSampleBatch(it, queryBatchSampleSize)
   797  		if err != nil {
   798  			return err
   799  		}
   800  		if len(batch.Series) == 0 {
   801  			return nil
   802  		}
   803  
   804  		stats.AddIngesterBatch(int64(size))
   805  		batch.Stats = stats.Ingester()
   806  
   807  		if err := queryServer.Send(batch); err != nil {
   808  			return err
   809  		}
   810  
   811  		stats.Reset()
   812  
   813  	}
   814  	return nil
   815  }
   816  
   817  func shouldConsiderStream(stream *stream, reqFrom, reqThrough time.Time) bool {
   818  	from, to := stream.Bounds()
   819  
   820  	if reqThrough.UnixNano() > from.UnixNano() && reqFrom.UnixNano() <= to.UnixNano() {
   821  		return true
   822  	}
   823  	return false
   824  }
   825  
   826  // OnceSwitch is an optimized switch that can only ever be switched "on" in a concurrent environment.
   827  type OnceSwitch struct {
   828  	triggered atomic.Bool
   829  }
   830  
   831  func (o *OnceSwitch) Get() bool {
   832  	return o.triggered.Load()
   833  }
   834  
   835  func (o *OnceSwitch) Trigger() {
   836  	o.TriggerAnd(nil)
   837  }
   838  
   839  // TriggerAnd will ensure the switch is on and run the provided function if
   840  // the switch was not already toggled on.
   841  func (o *OnceSwitch) TriggerAnd(fn func()) {
   842  	triggeredPrior := o.triggered.Swap(true)
   843  	if !triggeredPrior && fn != nil {
   844  		fn()
   845  	}
   846  }