github.com/thanos-io/thanos@v0.32.5/pkg/shipper/shipper_e2e_test.go (about)

     1  // Copyright (c) The Thanos Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package shipper
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"encoding/json"
    10  	"io"
    11  	"math/rand"
    12  	"os"
    13  	"path"
    14  	"path/filepath"
    15  	"strings"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/thanos-io/thanos/pkg/extprom"
    20  
    21  	"github.com/go-kit/log"
    22  	"github.com/oklog/ulid"
    23  	"github.com/prometheus/client_golang/prometheus"
    24  	promtest "github.com/prometheus/client_golang/prometheus/testutil"
    25  	"github.com/prometheus/prometheus/model/labels"
    26  	"github.com/prometheus/prometheus/model/timestamp"
    27  	"github.com/prometheus/prometheus/tsdb"
    28  
    29  	"github.com/thanos-io/objstore"
    30  	"github.com/thanos-io/objstore/objtesting"
    31  
    32  	"github.com/efficientgo/core/testutil"
    33  	"github.com/thanos-io/thanos/pkg/block"
    34  	"github.com/thanos-io/thanos/pkg/block/metadata"
    35  	"github.com/thanos-io/thanos/pkg/testutil/e2eutil"
    36  )
    37  
    38  func TestShipper_SyncBlocks_e2e(t *testing.T) {
    39  	objtesting.ForeachStore(t, func(t *testing.T, bkt objstore.Bucket) {
    40  		// TODO(GiedriusS): consider switching to WrapWithMetrics() everywhere?
    41  		metrics := prometheus.NewRegistry()
    42  		metricsBucket := objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", metrics), "test")
    43  
    44  		dir := t.TempDir()
    45  
    46  		extLset := labels.FromStrings("prometheus", "prom-1")
    47  		shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, metricsBucket, func() labels.Labels { return extLset }, metadata.TestSource, nil, false, metadata.NoneFunc)
    48  
    49  		ctx, cancel := context.WithCancel(context.Background())
    50  		defer cancel()
    51  
    52  		// Create 10 new blocks. 9 of them (non compacted) should be actually uploaded.
    53  		var (
    54  			expBlocks    = map[ulid.ULID]struct{}{}
    55  			expFiles     = map[string][]byte{}
    56  			randr        = rand.New(rand.NewSource(0))
    57  			now          = time.Now()
    58  			ids          = []ulid.ULID{}
    59  			maxSyncSoFar int64
    60  		)
    61  		for i := 0; i < 10; i++ {
    62  			id := ulid.MustNew(uint64(i), randr)
    63  
    64  			bdir := filepath.Join(dir, id.String())
    65  			tmp := bdir + ".tmp"
    66  
    67  			testutil.Ok(t, os.Mkdir(tmp, 0777))
    68  
    69  			meta := metadata.Meta{
    70  				BlockMeta: tsdb.BlockMeta{
    71  					Version: 1,
    72  					ULID:    id,
    73  					Stats: tsdb.BlockStats{
    74  						NumSamples: 1,
    75  					},
    76  					MinTime: timestamp.FromTime(now.Add(time.Duration(i) * time.Hour)),
    77  					MaxTime: timestamp.FromTime(now.Add((time.Duration(i) * time.Hour) + 1)),
    78  
    79  					Compaction: tsdb.BlockMetaCompaction{
    80  						Level: 1,
    81  					},
    82  				},
    83  				Thanos: metadata.Thanos{
    84  					Source: metadata.TestSource,
    85  				},
    86  			}
    87  
    88  			// Sixth block is compacted one.
    89  			if i == 5 {
    90  				meta.Compaction.Level = 2
    91  			}
    92  
    93  			metab, err := json.Marshal(&meta)
    94  			testutil.Ok(t, err)
    95  
    96  			testutil.Ok(t, os.WriteFile(tmp+"/meta.json", metab, 0666))
    97  			testutil.Ok(t, os.WriteFile(tmp+"/index", []byte("indexcontents"), 0666))
    98  
    99  			// Running shipper while a block is being written to temp dir should not trigger uploads.
   100  			b, err := shipper.Sync(ctx)
   101  			testutil.Ok(t, err)
   102  			testutil.Equals(t, 0, b)
   103  
   104  			shipMeta, err := ReadMetaFile(dir)
   105  			testutil.Ok(t, err)
   106  			if len(shipMeta.Uploaded) == 0 {
   107  				shipMeta.Uploaded = []ulid.ULID{}
   108  			}
   109  			testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   110  
   111  			testutil.Ok(t, os.MkdirAll(tmp+"/chunks", 0777))
   112  			testutil.Ok(t, os.WriteFile(tmp+"/chunks/0001", []byte("chunkcontents1"), 0666))
   113  			testutil.Ok(t, os.WriteFile(tmp+"/chunks/0002", []byte("chunkcontents2"), 0666))
   114  
   115  			testutil.Ok(t, os.Rename(tmp, bdir))
   116  
   117  			// After rename sync should upload the block.
   118  			b, err = shipper.Sync(ctx)
   119  			testutil.Ok(t, err)
   120  
   121  			if i != 5 {
   122  				ids = append(ids, id)
   123  				maxSyncSoFar = meta.MaxTime
   124  				testutil.Equals(t, 1, b)
   125  			} else {
   126  				// 5 blocks uploaded so far - 5 existence checks & 25 uploads (5 files each).
   127  				testutil.Ok(t, promtest.GatherAndCompare(metrics, strings.NewReader(`
   128  				# HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket.
   129  				# TYPE thanos_objstore_bucket_operations_total counter
   130  				thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0
   131  				thanos_objstore_bucket_operations_total{bucket="test",operation="delete"} 0
   132  				thanos_objstore_bucket_operations_total{bucket="test",operation="exists"} 5
   133  				thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 0
   134  				thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0
   135  				thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 0
   136  				thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 20
   137  				`), `thanos_objstore_bucket_operations_total`))
   138  				testutil.Equals(t, 0, b)
   139  			}
   140  
   141  			// The external labels must be attached to the meta file on upload.
   142  			meta.Thanos.Labels = extLset.Map()
   143  			meta.Thanos.SegmentFiles = []string{"0001", "0002"}
   144  			meta.Thanos.Files = []metadata.File{
   145  				{RelPath: "chunks/0001", SizeBytes: 14},
   146  				{RelPath: "chunks/0002", SizeBytes: 14},
   147  				{RelPath: "index", SizeBytes: 13},
   148  				{RelPath: "meta.json"},
   149  			}
   150  
   151  			buf := bytes.Buffer{}
   152  			testutil.Ok(t, meta.Write(&buf))
   153  
   154  			// We will delete the fifth block and do not expect it to be re-uploaded later.
   155  			if i != 4 && i != 5 {
   156  				expBlocks[id] = struct{}{}
   157  
   158  				expFiles[id.String()+"/meta.json"] = buf.Bytes()
   159  				expFiles[id.String()+"/index"] = []byte("indexcontents")
   160  				expFiles[id.String()+"/chunks/0001"] = []byte("chunkcontents1")
   161  				expFiles[id.String()+"/chunks/0002"] = []byte("chunkcontents2")
   162  			}
   163  			if i == 4 {
   164  				testutil.Ok(t, block.Delete(ctx, log.NewNopLogger(), bkt, ids[4]))
   165  			}
   166  			// The shipper meta file should show all blocks as uploaded except the compacted one.
   167  			shipMeta, err = ReadMetaFile(dir)
   168  			testutil.Ok(t, err)
   169  			testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   170  
   171  			// Verify timestamps were updated correctly.
   172  			minTotal, maxSync, err := shipper.Timestamps()
   173  			testutil.Ok(t, err)
   174  			testutil.Equals(t, timestamp.FromTime(now), minTotal)
   175  			testutil.Equals(t, maxSyncSoFar, maxSync)
   176  		}
   177  
   178  		for id := range expBlocks {
   179  			ok, _ := bkt.Exists(ctx, path.Join(id.String(), block.MetaFilename))
   180  			testutil.Assert(t, ok, "block %s was not uploaded", id)
   181  		}
   182  		for fn, exp := range expFiles {
   183  			rc, err := bkt.Get(ctx, fn)
   184  			testutil.Ok(t, err)
   185  			act, err := io.ReadAll(rc)
   186  			testutil.Ok(t, err)
   187  			testutil.Ok(t, rc.Close())
   188  			testutil.Equals(t, string(exp), string(act))
   189  		}
   190  		// Verify the fifth block is still deleted by the end.
   191  		ok, err := bkt.Exists(ctx, ids[4].String()+"/meta.json")
   192  		testutil.Ok(t, err)
   193  		testutil.Assert(t, ok == false, "fifth block was reuploaded")
   194  	})
   195  }
   196  
   197  func TestShipper_SyncBlocksWithMigrating_e2e(t *testing.T) {
   198  	e2eutil.ForeachPrometheus(t, func(t testing.TB, p *e2eutil.Prometheus) {
   199  		dir := t.TempDir()
   200  
   201  		bkt := objstore.NewInMemBucket()
   202  
   203  		ctx, cancel := context.WithCancel(context.Background())
   204  		defer cancel()
   205  
   206  		extLset := labels.FromStrings("prometheus", "prom-1")
   207  
   208  		testutil.Ok(t, p.Start())
   209  
   210  		logger := log.NewNopLogger()
   211  		upctx, upcancel := context.WithTimeout(ctx, 10*time.Second)
   212  		defer upcancel()
   213  		testutil.Ok(t, p.WaitPrometheusUp(upctx, logger))
   214  
   215  		p.DisableCompaction()
   216  		testutil.Ok(t, p.Restart())
   217  
   218  		upctx2, upcancel2 := context.WithTimeout(ctx, 10*time.Second)
   219  		defer upcancel2()
   220  		testutil.Ok(t, p.WaitPrometheusUp(upctx2, logger))
   221  
   222  		uploadCompactedFunc := func() bool { return true }
   223  		shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, false, metadata.NoneFunc)
   224  
   225  		// Create 10 new blocks. 9 of them (non compacted) should be actually uploaded.
   226  		var (
   227  			expBlocks = map[ulid.ULID]struct{}{}
   228  			expFiles  = map[string][]byte{}
   229  			randr     = rand.New(rand.NewSource(0))
   230  			now       = time.Now()
   231  			ids       = []ulid.ULID{}
   232  		)
   233  		for i := 0; i < 10; i++ {
   234  			id := ulid.MustNew(uint64(i), randr)
   235  
   236  			bdir := filepath.Join(dir, id.String())
   237  			tmp := bdir + ".tmp"
   238  
   239  			testutil.Ok(t, os.Mkdir(tmp, 0777))
   240  
   241  			meta := metadata.Meta{
   242  				BlockMeta: tsdb.BlockMeta{
   243  					Version: 1,
   244  					ULID:    id,
   245  					Stats: tsdb.BlockStats{
   246  						NumSamples: 1,
   247  					},
   248  					MinTime: timestamp.FromTime(now.Add(time.Duration(i) * time.Hour)),
   249  					MaxTime: timestamp.FromTime(now.Add((time.Duration(i) * time.Hour) + 1)),
   250  
   251  					Compaction: tsdb.BlockMetaCompaction{
   252  						Level: 1,
   253  					},
   254  				},
   255  				Thanos: metadata.Thanos{
   256  					Source: metadata.TestSource,
   257  				},
   258  			}
   259  
   260  			// Fifth block is compacted one.
   261  			if i == 4 {
   262  				meta.Compaction.Level = 2
   263  			}
   264  
   265  			metab, err := json.Marshal(&meta)
   266  			testutil.Ok(t, err)
   267  
   268  			testutil.Ok(t, os.WriteFile(tmp+"/meta.json", metab, 0666))
   269  			testutil.Ok(t, os.WriteFile(tmp+"/index", []byte("indexcontents"), 0666))
   270  
   271  			// Running shipper while a block is being written to temp dir should not trigger uploads.
   272  			b, err := shipper.Sync(ctx)
   273  			testutil.Ok(t, err)
   274  			testutil.Equals(t, 0, b)
   275  
   276  			shipMeta, err := ReadMetaFile(dir)
   277  			testutil.Ok(t, err)
   278  			if len(shipMeta.Uploaded) == 0 {
   279  				shipMeta.Uploaded = []ulid.ULID{}
   280  			}
   281  			testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   282  
   283  			testutil.Ok(t, os.MkdirAll(tmp+"/chunks", 0777))
   284  			testutil.Ok(t, os.WriteFile(tmp+"/chunks/0001", []byte("chunkcontents1"), 0666))
   285  			testutil.Ok(t, os.WriteFile(tmp+"/chunks/0002", []byte("chunkcontents2"), 0666))
   286  
   287  			testutil.Ok(t, os.Rename(tmp, bdir))
   288  
   289  			// After rename sync should upload the block.
   290  			b, err = shipper.Sync(ctx)
   291  			testutil.Ok(t, err)
   292  			testutil.Equals(t, 1, b)
   293  			ids = append(ids, id)
   294  
   295  			// The external labels must be attached to the meta file on upload.
   296  			meta.Thanos.Labels = extLset.Map()
   297  			meta.Thanos.SegmentFiles = []string{"0001", "0002"}
   298  			meta.Thanos.Files = []metadata.File{
   299  				{RelPath: "chunks/0001", SizeBytes: 14},
   300  				{RelPath: "chunks/0002", SizeBytes: 14},
   301  				{RelPath: "index", SizeBytes: 13},
   302  				{RelPath: "meta.json"},
   303  			}
   304  
   305  			buf := bytes.Buffer{}
   306  			testutil.Ok(t, meta.Write(&buf))
   307  
   308  			// We will delete the fifth block and do not expect it to be re-uploaded later.
   309  			if i != 4 {
   310  				expBlocks[id] = struct{}{}
   311  
   312  				expFiles[id.String()+"/meta.json"] = buf.Bytes()
   313  				expFiles[id.String()+"/index"] = []byte("indexcontents")
   314  				expFiles[id.String()+"/chunks/0001"] = []byte("chunkcontents1")
   315  				expFiles[id.String()+"/chunks/0002"] = []byte("chunkcontents2")
   316  			}
   317  			if i == 4 {
   318  				testutil.Ok(t, block.Delete(ctx, log.NewNopLogger(), bkt, ids[4]))
   319  			}
   320  			// The shipper meta file should show all blocks as uploaded except the compacted one.
   321  			shipMeta, err = ReadMetaFile(dir)
   322  			testutil.Ok(t, err)
   323  			testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   324  
   325  			// Verify timestamps were updated correctly.
   326  			minTotal, maxSync, err := shipper.Timestamps()
   327  			testutil.Ok(t, err)
   328  			testutil.Equals(t, timestamp.FromTime(now), minTotal)
   329  			testutil.Equals(t, meta.MaxTime, maxSync)
   330  		}
   331  
   332  		for id := range expBlocks {
   333  			ok, _ := bkt.Exists(ctx, path.Join(id.String(), block.MetaFilename))
   334  			testutil.Assert(t, ok, "block %s was not uploaded", id)
   335  		}
   336  		for fn, exp := range expFiles {
   337  			rc, err := bkt.Get(ctx, fn)
   338  			testutil.Ok(t, err)
   339  			act, err := io.ReadAll(rc)
   340  			testutil.Ok(t, err)
   341  			testutil.Ok(t, rc.Close())
   342  			testutil.Equals(t, string(exp), string(act))
   343  		}
   344  		// Verify the fifth block is still deleted by the end.
   345  		ok, err := bkt.Exists(ctx, ids[4].String()+"/meta.json")
   346  		testutil.Ok(t, err)
   347  		testutil.Assert(t, ok == false, "fifth block was reuploaded")
   348  	})
   349  }
   350  
   351  // TestShipper_SyncOverlapBlocks_e2e is a unit test for the functionality by allowOutOfOrderUploads flag. This allows compacted(compaction level greater than 1) blocks to be uploaded despite overlapping time ranges.
   352  func TestShipper_SyncOverlapBlocks_e2e(t *testing.T) {
   353  	p, err := e2eutil.NewPrometheus()
   354  	testutil.Ok(t, err)
   355  	dir := t.TempDir()
   356  
   357  	bkt := objstore.NewInMemBucket()
   358  
   359  	ctx, cancel := context.WithCancel(context.Background())
   360  	defer cancel()
   361  
   362  	extLset := labels.FromStrings("prometheus", "prom-1")
   363  
   364  	testutil.Ok(t, p.Start())
   365  
   366  	logger := log.NewNopLogger()
   367  	upctx, upcancel := context.WithTimeout(ctx, 10*time.Second)
   368  	defer upcancel()
   369  	testutil.Ok(t, p.WaitPrometheusUp(upctx, logger))
   370  
   371  	p.DisableCompaction()
   372  	testutil.Ok(t, p.Restart())
   373  
   374  	upctx2, upcancel2 := context.WithTimeout(ctx, 10*time.Second)
   375  	defer upcancel2()
   376  	testutil.Ok(t, p.WaitPrometheusUp(upctx2, logger))
   377  
   378  	uploadCompactedFunc := func() bool { return true }
   379  	// Here, the allowOutOfOrderUploads flag is set to true, which allows blocks with overlaps to be uploaded.
   380  	shipper := New(log.NewLogfmtLogger(os.Stderr), nil, dir, bkt, func() labels.Labels { return extLset }, metadata.TestSource, uploadCompactedFunc, true, metadata.NoneFunc)
   381  
   382  	// Creating 2 overlapping blocks - both uploaded when OOO uploads allowed.
   383  	var (
   384  		expBlocks = map[ulid.ULID]struct{}{}
   385  		expFiles  = map[string][]byte{}
   386  		randr     = rand.New(rand.NewSource(0))
   387  		ids       = []ulid.ULID{}
   388  	)
   389  
   390  	id := make([]ulid.ULID, 2)
   391  	tmp := make([]string, 2)
   392  	m := make([]metadata.Meta, 2)
   393  
   394  	for i := 0; i < 2; i++ {
   395  		id[i] = ulid.MustNew(uint64(i), randr)
   396  
   397  		bdir := filepath.Join(dir, id[i].String())
   398  		tmp := bdir + ".tmp"
   399  
   400  		testutil.Ok(t, os.Mkdir(tmp, 0777))
   401  
   402  		m[i] = metadata.Meta{
   403  			BlockMeta: tsdb.BlockMeta{
   404  				Version: 1,
   405  				ULID:    id[i],
   406  				Stats: tsdb.BlockStats{
   407  					NumSamples: 1,
   408  				},
   409  				Compaction: tsdb.BlockMetaCompaction{
   410  					Level: 2,
   411  				},
   412  			},
   413  			Thanos: metadata.Thanos{
   414  				Source: metadata.TestSource,
   415  			},
   416  		}
   417  	}
   418  
   419  	m[0].BlockMeta.MinTime = 10
   420  	m[0].BlockMeta.MaxTime = 20
   421  
   422  	m[1].BlockMeta.MinTime = 15
   423  	m[1].BlockMeta.MaxTime = 17
   424  
   425  	for i := 0; i < 2; i++ {
   426  		bdir := filepath.Join(dir, m[i].BlockMeta.ULID.String())
   427  		tmp[i] = bdir + ".tmp"
   428  
   429  		metab, err := json.Marshal(&m[i])
   430  		testutil.Ok(t, err)
   431  
   432  		testutil.Ok(t, os.WriteFile(tmp[i]+"/meta.json", metab, 0666))
   433  		testutil.Ok(t, os.WriteFile(tmp[i]+"/index", []byte("indexcontents"), 0666))
   434  
   435  		// Running shipper while a block is being written to temp dir should not trigger uploads.
   436  		b, err := shipper.Sync(ctx)
   437  		testutil.Ok(t, err)
   438  		testutil.Equals(t, 0, b)
   439  
   440  		shipMeta, err := ReadMetaFile(dir)
   441  		testutil.Ok(t, err)
   442  		if len(shipMeta.Uploaded) == 0 {
   443  			shipMeta.Uploaded = []ulid.ULID{}
   444  		}
   445  		testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   446  
   447  		testutil.Ok(t, os.MkdirAll(tmp[i]+"/chunks", 0777))
   448  		testutil.Ok(t, os.WriteFile(tmp[i]+"/chunks/0001", []byte("chunkcontents1"), 0666))
   449  		testutil.Ok(t, os.WriteFile(tmp[i]+"/chunks/0002", []byte("chunkcontents2"), 0666))
   450  
   451  		testutil.Ok(t, os.Rename(tmp[i], bdir))
   452  
   453  		// After rename sync should upload the block.
   454  		b, err = shipper.Sync(ctx)
   455  		testutil.Ok(t, err)
   456  		testutil.Equals(t, 1, b)
   457  		ids = append(ids, id[i])
   458  
   459  		// The external labels must be attached to the meta file on upload.
   460  		m[i].Thanos.Labels = extLset.Map()
   461  		m[i].Thanos.SegmentFiles = []string{"0001", "0002"}
   462  		m[i].Thanos.Files = []metadata.File{
   463  			{RelPath: "chunks/0001", SizeBytes: 14},
   464  			{RelPath: "chunks/0002", SizeBytes: 14},
   465  			{RelPath: "index", SizeBytes: 13},
   466  			{RelPath: "meta.json"},
   467  		}
   468  
   469  		buf := bytes.Buffer{}
   470  		testutil.Ok(t, m[i].Write(&buf))
   471  
   472  		expBlocks[id[i]] = struct{}{}
   473  		expFiles[id[i].String()+"/meta.json"] = buf.Bytes()
   474  		expFiles[id[i].String()+"/index"] = []byte("indexcontents")
   475  		expFiles[id[i].String()+"/chunks/0001"] = []byte("chunkcontents1")
   476  		expFiles[id[i].String()+"/chunks/0002"] = []byte("chunkcontents2")
   477  
   478  		// The shipper meta file should show all blocks as uploaded except the compacted one.
   479  		shipMeta, err = ReadMetaFile(dir)
   480  		testutil.Ok(t, err)
   481  		testutil.Equals(t, &Meta{Version: MetaVersion1, Uploaded: ids}, shipMeta)
   482  	}
   483  
   484  	for id := range expBlocks {
   485  		ok, _ := bkt.Exists(ctx, path.Join(id.String(), block.MetaFilename))
   486  		testutil.Assert(t, ok, "block %s was not uploaded", id)
   487  	}
   488  
   489  	for fn, exp := range expFiles {
   490  		rc, err := bkt.Get(ctx, fn)
   491  		testutil.Ok(t, err)
   492  		act, err := io.ReadAll(rc)
   493  		testutil.Ok(t, err)
   494  		testutil.Ok(t, rc.Close())
   495  		testutil.Equals(t, string(exp), string(act))
   496  	}
   497  }