github.com/thanos-io/thanos@v0.32.5/pkg/testutil/e2eutil/prometheus.go (about)

     1  // Copyright (c) The Thanos Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package e2eutil
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"math"
    13  	"math/rand"
    14  	"net/http"
    15  	"os"
    16  	"os/exec"
    17  	"path"
    18  	"path/filepath"
    19  	"runtime"
    20  	"sort"
    21  	"strings"
    22  	"sync"
    23  	"syscall"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/go-kit/log"
    28  	"github.com/oklog/ulid"
    29  	"github.com/pkg/errors"
    30  	"github.com/prometheus/prometheus/model/histogram"
    31  	"github.com/prometheus/prometheus/model/labels"
    32  	"github.com/prometheus/prometheus/model/timestamp"
    33  	"github.com/prometheus/prometheus/storage"
    34  	"github.com/prometheus/prometheus/tsdb"
    35  	"github.com/prometheus/prometheus/tsdb/chunkenc"
    36  	"github.com/prometheus/prometheus/tsdb/chunks"
    37  	"github.com/prometheus/prometheus/tsdb/index"
    38  	"go.uber.org/atomic"
    39  	"golang.org/x/sync/errgroup"
    40  
    41  	"github.com/efficientgo/core/testutil"
    42  
    43  	"github.com/thanos-io/thanos/pkg/block/metadata"
    44  	"github.com/thanos-io/thanos/pkg/runutil"
    45  )
    46  
    47  const (
    48  	defaultPrometheusVersion   = "v0.37.0"
    49  	defaultAlertmanagerVersion = "v0.20.0"
    50  	defaultMinioVersion        = "RELEASE.2022-07-30T05-21-40Z"
    51  
    52  	// Space delimited list of versions.
    53  	promPathsEnvVar       = "THANOS_TEST_PROMETHEUS_PATHS"
    54  	alertmanagerBinEnvVar = "THANOS_TEST_ALERTMANAGER_PATH"
    55  	minioBinEnvVar        = "THANOS_TEST_MINIO_PATH"
    56  
    57  	// A placeholder for actual Prometheus instance address in the scrape config.
    58  	PromAddrPlaceHolder = "PROMETHEUS_ADDRESS"
    59  )
    60  
    61  var histogramSample = histogram.Histogram{
    62  	Schema:        0,
    63  	Count:         9,
    64  	Sum:           -3.1415,
    65  	ZeroCount:     12,
    66  	ZeroThreshold: 0.001,
    67  	NegativeSpans: []histogram.Span{
    68  		{Offset: 0, Length: 4},
    69  		{Offset: 1, Length: 1},
    70  	},
    71  	NegativeBuckets: []int64{1, 2, -2, 1, -1},
    72  }
    73  
    74  func PrometheusBinary() string {
    75  	return "prometheus-" + defaultPrometheusVersion
    76  }
    77  
    78  func AlertmanagerBinary() string {
    79  	b := os.Getenv(alertmanagerBinEnvVar)
    80  	if b == "" {
    81  		return fmt.Sprintf("alertmanager-%s", defaultAlertmanagerVersion)
    82  	}
    83  	return b
    84  }
    85  
    86  func MinioBinary() string {
    87  	b := os.Getenv(minioBinEnvVar)
    88  	if b == "" {
    89  		return fmt.Sprintf("minio-%s", defaultMinioVersion)
    90  	}
    91  	return b
    92  }
    93  
    94  // Prometheus represents a test instance for integration testing.
    95  // It can be populated with data before being started.
    96  type Prometheus struct {
    97  	dir     string
    98  	db      *tsdb.DB
    99  	prefix  string
   100  	binPath string
   101  
   102  	running            bool
   103  	cmd                *exec.Cmd
   104  	disabledCompaction bool
   105  	addr               string
   106  
   107  	config string
   108  }
   109  
   110  func NewTSDB() (*tsdb.DB, error) {
   111  	dir, err := os.MkdirTemp("", "prometheus-test")
   112  	if err != nil {
   113  		return nil, err
   114  	}
   115  	opts := tsdb.DefaultOptions()
   116  	opts.RetentionDuration = math.MaxInt64
   117  	return tsdb.Open(dir, nil, nil, opts, nil)
   118  }
   119  
   120  func ForeachPrometheus(t *testing.T, testFn func(t testing.TB, p *Prometheus)) {
   121  	paths := os.Getenv(promPathsEnvVar)
   122  	if paths == "" {
   123  		paths = PrometheusBinary()
   124  	}
   125  
   126  	for _, path := range strings.Split(paths, " ") {
   127  		if ok := t.Run(path, func(t *testing.T) {
   128  			p, err := newPrometheus(path, "")
   129  			testutil.Ok(t, err)
   130  
   131  			testFn(t, p)
   132  			testutil.Ok(t, p.Stop())
   133  		}); !ok {
   134  			return
   135  		}
   136  	}
   137  }
   138  
   139  // NewPrometheus creates a new test Prometheus instance that will listen on local address.
   140  // Use ForeachPrometheus if you want to test against set of Prometheus versions.
   141  // TODO(bwplotka): Improve it with https://github.com/thanos-io/thanos/issues/758.
   142  func NewPrometheus() (*Prometheus, error) {
   143  	return newPrometheus("", "")
   144  }
   145  
   146  // NewPrometheusOnPath creates a new test Prometheus instance that will listen on local address and given prefix path.
   147  func NewPrometheusOnPath(prefix string) (*Prometheus, error) {
   148  	return newPrometheus("", prefix)
   149  }
   150  
   151  func newPrometheus(binPath, prefix string) (*Prometheus, error) {
   152  	if binPath == "" {
   153  		binPath = PrometheusBinary()
   154  	}
   155  
   156  	db, err := NewTSDB()
   157  	if err != nil {
   158  		return nil, err
   159  	}
   160  
   161  	f, err := os.Create(filepath.Join(db.Dir(), "prometheus.yml"))
   162  	if err != nil {
   163  		return nil, err
   164  	}
   165  	defer f.Close()
   166  
   167  	// Some well-known external labels so that we can test label resorting
   168  	if _, err = io.WriteString(f, "global:\n  external_labels:\n    region: eu-west"); err != nil {
   169  		return nil, err
   170  	}
   171  
   172  	return &Prometheus{
   173  		dir:     db.Dir(),
   174  		db:      db,
   175  		prefix:  prefix,
   176  		binPath: binPath,
   177  		addr:    "<prometheus-not-started>",
   178  	}, nil
   179  }
   180  
   181  // Start running the Prometheus instance and return.
   182  func (p *Prometheus) Start() error {
   183  	if p.running {
   184  		return errors.New("Already started")
   185  	}
   186  
   187  	if err := p.db.Close(); err != nil {
   188  		return err
   189  	}
   190  	return p.start()
   191  }
   192  
   193  func (p *Prometheus) start() error {
   194  	p.running = true
   195  
   196  	port, err := FreePort()
   197  	if err != nil {
   198  		return err
   199  	}
   200  
   201  	var extra []string
   202  	if p.disabledCompaction {
   203  		extra = append(extra,
   204  			"--storage.tsdb.min-block-duration=2h",
   205  			"--storage.tsdb.max-block-duration=2h",
   206  		)
   207  	}
   208  	p.addr = fmt.Sprintf("localhost:%d", port)
   209  	// Write the final config to the config file.
   210  	// The address placeholder will be replaced with the actual address.
   211  	if err := p.writeConfig(strings.ReplaceAll(p.config, PromAddrPlaceHolder, p.addr)); err != nil {
   212  		return err
   213  	}
   214  	args := append([]string{
   215  		"--storage.tsdb.retention=2d", // Pass retention cause prometheus since 2.8.0 don't show default value for that flags in web/api: https://github.com/prometheus/prometheus/pull/5433.
   216  		"--storage.tsdb.path=" + p.db.Dir(),
   217  		"--web.listen-address=" + p.addr,
   218  		"--web.route-prefix=" + p.prefix,
   219  		"--web.enable-admin-api",
   220  		"--config.file=" + filepath.Join(p.db.Dir(), "prometheus.yml"),
   221  	}, extra...)
   222  
   223  	p.cmd = exec.Command(p.binPath, args...)
   224  	p.cmd.SysProcAttr = SysProcAttr()
   225  
   226  	go func() {
   227  		if b, err := p.cmd.CombinedOutput(); err != nil {
   228  			fmt.Fprintln(os.Stderr, "running Prometheus failed", err)
   229  			fmt.Fprintln(os.Stderr, string(b))
   230  		}
   231  	}()
   232  	time.Sleep(2 * time.Second)
   233  
   234  	return nil
   235  }
   236  
   237  func (p *Prometheus) WaitPrometheusUp(ctx context.Context, logger log.Logger) error {
   238  	if !p.running {
   239  		return errors.New("method Start was not invoked.")
   240  	}
   241  	return runutil.Retry(time.Second, ctx.Done(), func() error {
   242  		r, err := http.Get(fmt.Sprintf("http://%s/-/ready", p.addr))
   243  		if err != nil {
   244  			return err
   245  		}
   246  		defer runutil.ExhaustCloseWithLogOnErr(logger, r.Body, "failed to exhaust and close body")
   247  
   248  		if r.StatusCode != 200 {
   249  			return errors.Errorf("Got non 200 response: %v", r.StatusCode)
   250  		}
   251  		return nil
   252  	})
   253  }
   254  
   255  func (p *Prometheus) Restart() error {
   256  	if err := p.cmd.Process.Signal(syscall.SIGTERM); err != nil {
   257  		return errors.Wrap(err, "failed to kill Prometheus. Kill it manually")
   258  	}
   259  	_ = p.cmd.Wait()
   260  	return p.start()
   261  }
   262  
   263  // Dir returns TSDB dir.
   264  func (p *Prometheus) Dir() string {
   265  	return p.dir
   266  }
   267  
   268  // Addr returns correct address after Start method.
   269  func (p *Prometheus) Addr() string {
   270  	return p.addr + p.prefix
   271  }
   272  
   273  func (p *Prometheus) DisableCompaction() {
   274  	p.disabledCompaction = true
   275  }
   276  
   277  // SetConfig updates the contents of the config.
   278  func (p *Prometheus) SetConfig(s string) {
   279  	p.config = s
   280  }
   281  
   282  // writeConfig writes the Prometheus config to the config file.
   283  func (p *Prometheus) writeConfig(config string) (err error) {
   284  	f, err := os.Create(filepath.Join(p.dir, "prometheus.yml"))
   285  	if err != nil {
   286  		return err
   287  	}
   288  	defer runutil.CloseWithErrCapture(&err, f, "prometheus config")
   289  	_, err = f.Write([]byte(config))
   290  	return err
   291  }
   292  
   293  // Stop terminates Prometheus and clean up its data directory.
   294  func (p *Prometheus) Stop() error {
   295  	if !p.running {
   296  		return nil
   297  	}
   298  
   299  	if p.cmd.Process != nil {
   300  		if err := p.cmd.Process.Signal(syscall.SIGTERM); err != nil {
   301  			return errors.Wrapf(err, "failed to Prometheus. Kill it manually and clean %s dir", p.db.Dir())
   302  		}
   303  	}
   304  	time.Sleep(time.Second / 2)
   305  	return p.cleanup()
   306  }
   307  
   308  func (p *Prometheus) cleanup() error {
   309  	p.running = false
   310  	return os.RemoveAll(p.db.Dir())
   311  }
   312  
   313  // Appender returns a new appender to populate the Prometheus instance with data.
   314  // All appenders must be closed before Start is called and no new ones must be opened
   315  // afterwards.
   316  func (p *Prometheus) Appender() storage.Appender {
   317  	if p.running {
   318  		panic("Appender must not be called after start")
   319  	}
   320  	return p.db.Appender(context.Background())
   321  }
   322  
   323  // CreateEmptyBlock produces empty block like it was the case before fix: https://github.com/prometheus/tsdb/pull/374.
   324  // (Prometheus pre v2.7.0).
   325  func CreateEmptyBlock(dir string, mint, maxt int64, extLset labels.Labels, resolution int64) (ulid.ULID, error) {
   326  	entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
   327  	uid := ulid.MustNew(ulid.Now(), entropy)
   328  
   329  	if err := os.Mkdir(path.Join(dir, uid.String()), os.ModePerm); err != nil {
   330  		return ulid.ULID{}, errors.Wrap(err, "close index")
   331  	}
   332  
   333  	if err := os.Mkdir(path.Join(dir, uid.String(), "chunks"), os.ModePerm); err != nil {
   334  		return ulid.ULID{}, errors.Wrap(err, "close index")
   335  	}
   336  
   337  	w, err := index.NewWriter(context.Background(), path.Join(dir, uid.String(), "index"))
   338  	if err != nil {
   339  		return ulid.ULID{}, errors.Wrap(err, "new index")
   340  	}
   341  
   342  	if err := w.Close(); err != nil {
   343  		return ulid.ULID{}, errors.Wrap(err, "close index")
   344  	}
   345  
   346  	m := tsdb.BlockMeta{
   347  		Version: 1,
   348  		ULID:    uid,
   349  		MinTime: mint,
   350  		MaxTime: maxt,
   351  		Compaction: tsdb.BlockMetaCompaction{
   352  			Level:   1,
   353  			Sources: []ulid.ULID{uid},
   354  		},
   355  	}
   356  	b, err := json.Marshal(&m)
   357  	if err != nil {
   358  		return ulid.ULID{}, err
   359  	}
   360  
   361  	if err := os.WriteFile(path.Join(dir, uid.String(), "meta.json"), b, os.ModePerm); err != nil {
   362  		return ulid.ULID{}, errors.Wrap(err, "saving meta.json")
   363  	}
   364  
   365  	if _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(dir, uid.String()), metadata.Thanos{
   366  		Labels:     extLset.Map(),
   367  		Downsample: metadata.ThanosDownsample{Resolution: resolution},
   368  		Source:     metadata.TestSource,
   369  	}, nil); err != nil {
   370  		return ulid.ULID{}, errors.Wrap(err, "finalize block")
   371  	}
   372  
   373  	return uid, nil
   374  }
   375  
   376  // CreateBlock writes a block with the given series and numSamples samples each.
   377  // Samples will be in the time range [mint, maxt).
   378  func CreateBlock(
   379  	ctx context.Context,
   380  	dir string,
   381  	series []labels.Labels,
   382  	numSamples int,
   383  	mint, maxt int64,
   384  	extLset labels.Labels,
   385  	resolution int64,
   386  	hashFunc metadata.HashFunc,
   387  ) (id ulid.ULID, err error) {
   388  	return createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, false, hashFunc, chunkenc.ValFloat)
   389  }
   390  
   391  // CreateBlockWithTombstone is same as CreateBlock but leaves tombstones which mimics the Prometheus local block.
   392  func CreateBlockWithTombstone(
   393  	ctx context.Context,
   394  	dir string,
   395  	series []labels.Labels,
   396  	numSamples int,
   397  	mint, maxt int64,
   398  	extLset labels.Labels,
   399  	resolution int64,
   400  	hashFunc metadata.HashFunc,
   401  ) (id ulid.ULID, err error) {
   402  	return createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, true, hashFunc, chunkenc.ValFloat)
   403  }
   404  
   405  // CreateBlockWithBlockDelay writes a block with the given series and numSamples samples each.
   406  // Samples will be in the time range [mint, maxt)
   407  // Block ID will be created with a delay of time duration blockDelay.
   408  func CreateBlockWithBlockDelay(
   409  	ctx context.Context,
   410  	dir string,
   411  	series []labels.Labels,
   412  	numSamples int,
   413  	mint, maxt int64,
   414  	blockDelay time.Duration,
   415  	extLset labels.Labels,
   416  	resolution int64,
   417  	hashFunc metadata.HashFunc,
   418  ) (ulid.ULID, error) {
   419  	return createBlockWithDelay(ctx, dir, series, numSamples, mint, maxt, blockDelay, extLset, resolution, hashFunc, chunkenc.ValFloat)
   420  }
   421  
   422  // CreateHistogramBlockWithDelay writes a block with the given native histogram series and numSamples samples each.
   423  // Samples will be in the time range [mint, maxt).
   424  func CreateHistogramBlockWithDelay(
   425  	ctx context.Context,
   426  	dir string,
   427  	series []labels.Labels,
   428  	numSamples int,
   429  	mint, maxt int64,
   430  	blockDelay time.Duration,
   431  	extLset labels.Labels,
   432  	resolution int64,
   433  	hashFunc metadata.HashFunc,
   434  ) (id ulid.ULID, err error) {
   435  	return createBlockWithDelay(ctx, dir, series, numSamples, mint, maxt, blockDelay, extLset, resolution, hashFunc, chunkenc.ValHistogram)
   436  }
   437  
   438  func createBlockWithDelay(ctx context.Context, dir string, series []labels.Labels, numSamples int, mint int64, maxt int64, blockDelay time.Duration, extLset labels.Labels, resolution int64, hashFunc metadata.HashFunc, samplesType chunkenc.ValueType) (ulid.ULID, error) {
   439  	blockID, err := createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, false, hashFunc, samplesType)
   440  	if err != nil {
   441  		return ulid.ULID{}, errors.Wrap(err, "block creation")
   442  	}
   443  
   444  	id, err := ulid.New(uint64(timestamp.FromTime(timestamp.Time(int64(blockID.Time())).Add(-blockDelay))), bytes.NewReader(blockID.Entropy()))
   445  	if err != nil {
   446  		return ulid.ULID{}, errors.Wrap(err, "create block id")
   447  	}
   448  
   449  	m, err := metadata.ReadFromDir(path.Join(dir, blockID.String()))
   450  	if err != nil {
   451  		return ulid.ULID{}, errors.Wrap(err, "open meta file")
   452  	}
   453  
   454  	m.ULID = id
   455  	m.Compaction.Sources = []ulid.ULID{id}
   456  
   457  	if err := m.WriteToDir(log.NewNopLogger(), path.Join(dir, blockID.String())); err != nil {
   458  		return ulid.ULID{}, errors.Wrap(err, "write meta.json file")
   459  	}
   460  
   461  	return id, os.Rename(path.Join(dir, blockID.String()), path.Join(dir, id.String()))
   462  }
   463  
   464  func createBlock(
   465  	ctx context.Context,
   466  	dir string,
   467  	series []labels.Labels,
   468  	numSamples int,
   469  	mint, maxt int64,
   470  	extLset labels.Labels,
   471  	resolution int64,
   472  	tombstones bool,
   473  	hashFunc metadata.HashFunc,
   474  	sampleType chunkenc.ValueType,
   475  ) (id ulid.ULID, err error) {
   476  	headOpts := tsdb.DefaultHeadOptions()
   477  	headOpts.ChunkDirRoot = filepath.Join(dir, "chunks")
   478  	headOpts.ChunkRange = 10000000000
   479  	headOpts.EnableNativeHistograms = *atomic.NewBool(true)
   480  	h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil)
   481  	if err != nil {
   482  		return id, errors.Wrap(err, "create head block")
   483  	}
   484  	defer func() {
   485  		runutil.CloseWithErrCapture(&err, h, "TSDB Head")
   486  		if e := os.RemoveAll(headOpts.ChunkDirRoot); e != nil {
   487  			err = errors.Wrap(e, "delete chunks dir")
   488  		}
   489  	}()
   490  
   491  	var g errgroup.Group
   492  	var timeStepSize = (maxt - mint) / int64(numSamples+1)
   493  	var batchSize = len(series) / runtime.GOMAXPROCS(0)
   494  	r := rand.New(rand.NewSource(int64(numSamples)))
   495  	var randMutex sync.Mutex
   496  
   497  	for len(series) > 0 {
   498  		l := batchSize
   499  		if len(series) < 1000 {
   500  			l = len(series)
   501  		}
   502  		batch := series[:l]
   503  		series = series[l:]
   504  
   505  		g.Go(func() error {
   506  			t := mint
   507  
   508  			for i := 0; i < numSamples; i++ {
   509  				app := h.Appender(ctx)
   510  
   511  				for _, lset := range batch {
   512  					sort.Slice(lset, func(i, j int) bool {
   513  						return lset[i].Name < lset[j].Name
   514  					})
   515  
   516  					var err error
   517  					if sampleType == chunkenc.ValFloat {
   518  						randMutex.Lock()
   519  						_, err = app.Append(0, lset, t, r.Float64())
   520  						randMutex.Unlock()
   521  					} else if sampleType == chunkenc.ValHistogram {
   522  						_, err = app.AppendHistogram(0, lset, t, &histogramSample, nil)
   523  					}
   524  					if err != nil {
   525  						if rerr := app.Rollback(); rerr != nil {
   526  							err = errors.Wrapf(err, "rollback failed: %v", rerr)
   527  						}
   528  
   529  						return errors.Wrap(err, "add sample")
   530  					}
   531  				}
   532  				if err := app.Commit(); err != nil {
   533  					return errors.Wrap(err, "commit")
   534  				}
   535  				t += timeStepSize
   536  			}
   537  			return nil
   538  		})
   539  	}
   540  	if err := g.Wait(); err != nil {
   541  		return id, err
   542  	}
   543  	c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil)
   544  	if err != nil {
   545  		return id, errors.Wrap(err, "create compactor")
   546  	}
   547  
   548  	id, err = c.Write(dir, h, mint, maxt, nil)
   549  	if err != nil {
   550  		return id, errors.Wrap(err, "write block")
   551  	}
   552  
   553  	if id.Compare(ulid.ULID{}) == 0 {
   554  		return id, errors.Errorf("nothing to write, asked for %d samples", numSamples)
   555  	}
   556  
   557  	blockDir := filepath.Join(dir, id.String())
   558  
   559  	files := []metadata.File{}
   560  	if hashFunc != metadata.NoneFunc {
   561  		paths := []string{}
   562  		if err := filepath.Walk(blockDir, func(path string, info os.FileInfo, err error) error {
   563  			if info.IsDir() {
   564  				return nil
   565  			}
   566  			paths = append(paths, path)
   567  			return nil
   568  		}); err != nil {
   569  			return id, errors.Wrapf(err, "walking %s", dir)
   570  		}
   571  
   572  		for _, p := range paths {
   573  			pHash, err := metadata.CalculateHash(p, metadata.SHA256Func, log.NewNopLogger())
   574  			if err != nil {
   575  				return id, errors.Wrapf(err, "calculating hash of %s", blockDir+p)
   576  			}
   577  			files = append(files, metadata.File{
   578  				RelPath: strings.TrimPrefix(p, blockDir+"/"),
   579  				Hash:    &pHash,
   580  			})
   581  		}
   582  	}
   583  
   584  	if _, err = metadata.InjectThanos(log.NewNopLogger(), blockDir, metadata.Thanos{
   585  		Labels:     extLset.Map(),
   586  		Downsample: metadata.ThanosDownsample{Resolution: resolution},
   587  		Source:     metadata.TestSource,
   588  		Files:      files,
   589  	}, nil); err != nil {
   590  		return id, errors.Wrap(err, "finalize block")
   591  	}
   592  
   593  	if !tombstones {
   594  		if err = os.Remove(filepath.Join(dir, id.String(), "tombstones")); err != nil {
   595  			return id, errors.Wrap(err, "remove tombstones")
   596  		}
   597  	}
   598  
   599  	return id, nil
   600  }
   601  
   602  var indexFilename = "index"
   603  
   604  type indexWriterSeries struct {
   605  	labels labels.Labels
   606  	chunks []chunks.Meta // series file offset of chunks
   607  }
   608  
   609  type indexWriterSeriesSlice []*indexWriterSeries
   610  
   611  // PutOutOfOrderIndex updates the index in blockDir with an index containing an out-of-order chunk
   612  // copied from https://github.com/prometheus/prometheus/blob/b1ed4a0a663d0c62526312311c7529471abbc565/tsdb/index/index_test.go#L346
   613  func PutOutOfOrderIndex(blockDir string, minTime int64, maxTime int64) error {
   614  
   615  	if minTime >= maxTime || minTime+4 >= maxTime {
   616  		return fmt.Errorf("minTime must be at least 4 less than maxTime to not create overlapping chunks")
   617  	}
   618  
   619  	lbls := []labels.Labels{
   620  		[]labels.Label{
   621  			{Name: "lbl1", Value: "1"},
   622  		},
   623  	}
   624  
   625  	// Sort labels as the index writer expects series in sorted order.
   626  	sort.Sort(labels.Slice(lbls))
   627  
   628  	symbols := map[string]struct{}{}
   629  	for _, lset := range lbls {
   630  		for _, l := range lset {
   631  			symbols[l.Name] = struct{}{}
   632  			symbols[l.Value] = struct{}{}
   633  		}
   634  	}
   635  
   636  	var input indexWriterSeriesSlice
   637  
   638  	// Generate ChunkMetas for every label set.
   639  	// Ignoring gosec as it is only used for tests.
   640  	for _, lset := range lbls {
   641  		var metas []chunks.Meta
   642  		// only need two chunks that are out-of-order
   643  		chk1 := chunks.Meta{
   644  			MinTime: maxTime - 2,
   645  			MaxTime: maxTime - 1,
   646  			Ref:     chunks.ChunkRef(rand.Uint64()), // nolint:gosec
   647  			Chunk:   chunkenc.NewXORChunk(),
   648  		}
   649  		metas = append(metas, chk1)
   650  		chk2 := chunks.Meta{
   651  			MinTime: minTime + 1,
   652  			MaxTime: minTime + 2,
   653  			Ref:     chunks.ChunkRef(rand.Uint64()), // nolint:gosec
   654  			Chunk:   chunkenc.NewXORChunk(),
   655  		}
   656  		metas = append(metas, chk2)
   657  
   658  		input = append(input, &indexWriterSeries{
   659  			labels: lset,
   660  			chunks: metas,
   661  		})
   662  	}
   663  
   664  	iw, err := index.NewWriter(context.Background(), filepath.Join(blockDir, indexFilename))
   665  	if err != nil {
   666  		return err
   667  	}
   668  
   669  	syms := []string{}
   670  	for s := range symbols {
   671  		syms = append(syms, s)
   672  	}
   673  	sort.Strings(syms)
   674  	for _, s := range syms {
   675  		if err := iw.AddSymbol(s); err != nil {
   676  			return err
   677  		}
   678  	}
   679  
   680  	// Population procedure as done by compaction.
   681  	var (
   682  		postings = index.NewMemPostings()
   683  		values   = map[string]map[string]struct{}{}
   684  	)
   685  
   686  	for i, s := range input {
   687  		if err := iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...); err != nil {
   688  			return err
   689  		}
   690  
   691  		for _, l := range s.labels {
   692  			valset, ok := values[l.Name]
   693  			if !ok {
   694  				valset = map[string]struct{}{}
   695  				values[l.Name] = valset
   696  			}
   697  			valset[l.Value] = struct{}{}
   698  		}
   699  		postings.Add(storage.SeriesRef(i), s.labels)
   700  	}
   701  
   702  	return iw.Close()
   703  }