github.com/thanos-io/thanos@v0.32.5/pkg/block/block_test.go (about)

     1  // Copyright (c) The Thanos Authors.
     2  // Licensed under the Apache License 2.0.
     3  
     4  package block
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"os"
    13  	"path"
    14  	"strings"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/thanos-io/thanos/pkg/extprom"
    19  
    20  	"github.com/go-kit/log"
    21  	"github.com/oklog/ulid"
    22  	"github.com/pkg/errors"
    23  	"github.com/prometheus/client_golang/prometheus"
    24  	"github.com/prometheus/client_golang/prometheus/promauto"
    25  	promtest "github.com/prometheus/client_golang/prometheus/testutil"
    26  	"github.com/prometheus/prometheus/model/labels"
    27  	"github.com/thanos-io/objstore"
    28  
    29  	"github.com/efficientgo/core/testutil"
    30  	"github.com/thanos-io/thanos/pkg/block/metadata"
    31  	"github.com/thanos-io/thanos/pkg/testutil/custom"
    32  	"github.com/thanos-io/thanos/pkg/testutil/e2eutil"
    33  )
    34  
    35  func TestIsBlockDir(t *testing.T) {
    36  	for _, tc := range []struct {
    37  		input string
    38  		id    ulid.ULID
    39  		bdir  bool
    40  	}{
    41  		{
    42  			input: "",
    43  			bdir:  false,
    44  		},
    45  		{
    46  			input: "something",
    47  			bdir:  false,
    48  		},
    49  		{
    50  			id:    ulid.MustNew(1, nil),
    51  			input: ulid.MustNew(1, nil).String(),
    52  			bdir:  true,
    53  		},
    54  		{
    55  			id:    ulid.MustNew(2, nil),
    56  			input: "/" + ulid.MustNew(2, nil).String(),
    57  			bdir:  true,
    58  		},
    59  		{
    60  			id:    ulid.MustNew(3, nil),
    61  			input: "some/path/" + ulid.MustNew(3, nil).String(),
    62  			bdir:  true,
    63  		},
    64  		{
    65  			input: ulid.MustNew(4, nil).String() + "/something",
    66  			bdir:  false,
    67  		},
    68  	} {
    69  		t.Run(tc.input, func(t *testing.T) {
    70  			id, ok := IsBlockDir(tc.input)
    71  			testutil.Equals(t, tc.bdir, ok)
    72  
    73  			if id.Compare(tc.id) != 0 {
    74  				t.Errorf("expected %s got %s", tc.id, id)
    75  				t.FailNow()
    76  			}
    77  		})
    78  	}
    79  }
    80  
    81  func TestUpload(t *testing.T) {
    82  	defer custom.TolerantVerifyLeak(t)
    83  
    84  	ctx := context.Background()
    85  
    86  	tmpDir := t.TempDir()
    87  
    88  	bkt := objstore.NewInMemBucket()
    89  	b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
    90  		{{Name: "a", Value: "1"}},
    91  		{{Name: "a", Value: "2"}},
    92  		{{Name: "a", Value: "3"}},
    93  		{{Name: "a", Value: "4"}},
    94  		{{Name: "b", Value: "1"}},
    95  	}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
    96  	testutil.Ok(t, err)
    97  	testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String()), os.ModePerm))
    98  
    99  	{
   100  		// Wrong dir.
   101  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "not-existing"), metadata.NoneFunc)
   102  		testutil.NotOk(t, err)
   103  		testutil.Assert(t, strings.HasSuffix(err.Error(), "/not-existing: no such file or directory"), "")
   104  	}
   105  	{
   106  		// Wrong existing dir (not a block).
   107  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test"), metadata.NoneFunc)
   108  		testutil.NotOk(t, err)
   109  		testutil.Equals(t, "not a block dir: ulid: bad data size when unmarshaling", err.Error())
   110  	}
   111  	{
   112  		// Empty block dir.
   113  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)
   114  		testutil.NotOk(t, err)
   115  		testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "")
   116  	}
   117  	e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename))
   118  	{
   119  		// Missing chunks.
   120  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)
   121  		testutil.NotOk(t, err)
   122  		testutil.Assert(t, strings.HasSuffix(err.Error(), "/chunks: no such file or directory"), err.Error())
   123  	}
   124  	testutil.Ok(t, os.MkdirAll(path.Join(tmpDir, "test", b1.String(), ChunksDirname), os.ModePerm))
   125  	e2eutil.Copy(t, path.Join(tmpDir, b1.String(), ChunksDirname, "000001"), path.Join(tmpDir, "test", b1.String(), ChunksDirname, "000001"))
   126  	{
   127  		// Missing index file.
   128  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)
   129  		testutil.NotOk(t, err)
   130  		testutil.Assert(t, strings.HasSuffix(err.Error(), "/index: no such file or directory"), "")
   131  	}
   132  	e2eutil.Copy(t, path.Join(tmpDir, b1.String(), IndexFilename), path.Join(tmpDir, "test", b1.String(), IndexFilename))
   133  	testutil.Ok(t, os.Remove(path.Join(tmpDir, "test", b1.String(), MetaFilename)))
   134  	{
   135  		// Missing meta.json file.
   136  		err := Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc)
   137  		testutil.NotOk(t, err)
   138  		testutil.Assert(t, strings.HasSuffix(err.Error(), "/meta.json: no such file or directory"), "")
   139  	}
   140  	e2eutil.Copy(t, path.Join(tmpDir, b1.String(), MetaFilename), path.Join(tmpDir, "test", b1.String(), MetaFilename))
   141  	{
   142  		// Full block.
   143  		testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc))
   144  		testutil.Equals(t, 3, len(bkt.Objects()))
   145  		testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
   146  		testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
   147  		testutil.Equals(t, 567, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
   148  
   149  		// File stats are gathered.
   150  		testutil.Equals(t, fmt.Sprintf(`{
   151  	"ulid": "%s",
   152  	"minTime": 0,
   153  	"maxTime": 1000,
   154  	"stats": {
   155  		"numSamples": 500,
   156  		"numSeries": 5,
   157  		"numChunks": 5
   158  	},
   159  	"compaction": {
   160  		"level": 1,
   161  		"sources": [
   162  			"%s"
   163  		]
   164  	},
   165  	"version": 1,
   166  	"thanos": {
   167  		"labels": {
   168  			"ext1": "val1"
   169  		},
   170  		"downsample": {
   171  			"resolution": 124
   172  		},
   173  		"source": "test",
   174  		"files": [
   175  			{
   176  				"rel_path": "chunks/000001",
   177  				"size_bytes": 3727
   178  			},
   179  			{
   180  				"rel_path": "index",
   181  				"size_bytes": 401
   182  			},
   183  			{
   184  				"rel_path": "meta.json"
   185  			}
   186  		],
   187  		"index_stats": {}
   188  	}
   189  }
   190  `, b1.String(), b1.String()), string(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
   191  	}
   192  	{
   193  		// Test Upload is idempotent.
   194  		testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.String()), metadata.NoneFunc))
   195  		testutil.Equals(t, 3, len(bkt.Objects()))
   196  		testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
   197  		testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
   198  		testutil.Equals(t, 567, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
   199  	}
   200  	{
   201  		// Upload with no external labels should be blocked.
   202  		b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   203  			{{Name: "a", Value: "1"}},
   204  			{{Name: "a", Value: "2"}},
   205  			{{Name: "a", Value: "3"}},
   206  			{{Name: "a", Value: "4"}},
   207  			{{Name: "b", Value: "1"}},
   208  		}, 100, 0, 1000, nil, 124, metadata.NoneFunc)
   209  		testutil.Ok(t, err)
   210  		err = Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc)
   211  		testutil.NotOk(t, err)
   212  		testutil.Equals(t, "empty external labels are not allowed for Thanos block.", err.Error())
   213  		testutil.Equals(t, 3, len(bkt.Objects()))
   214  	}
   215  	{
   216  		// No external labels with UploadPromBlocks.
   217  		b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   218  			{{Name: "a", Value: "1"}},
   219  			{{Name: "a", Value: "2"}},
   220  			{{Name: "a", Value: "3"}},
   221  			{{Name: "a", Value: "4"}},
   222  			{{Name: "b", Value: "1"}},
   223  		}, 100, 0, 1000, nil, 124, metadata.NoneFunc)
   224  		testutil.Ok(t, err)
   225  		err = UploadPromBlock(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc)
   226  		testutil.Ok(t, err)
   227  		testutil.Equals(t, 6, len(bkt.Objects()))
   228  		testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")]))
   229  		testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b2.String(), IndexFilename)]))
   230  		testutil.Equals(t, 546, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)]))
   231  	}
   232  }
   233  
   234  func TestDelete(t *testing.T) {
   235  	defer custom.TolerantVerifyLeak(t)
   236  	ctx := context.Background()
   237  
   238  	tmpDir := t.TempDir()
   239  
   240  	bkt := objstore.NewInMemBucket()
   241  	{
   242  		b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   243  			{{Name: "a", Value: "1"}},
   244  			{{Name: "a", Value: "2"}},
   245  			{{Name: "a", Value: "3"}},
   246  			{{Name: "a", Value: "4"}},
   247  			{{Name: "b", Value: "1"}},
   248  		}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   249  		testutil.Ok(t, err)
   250  		testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc))
   251  		testutil.Equals(t, 3, len(bkt.Objects()))
   252  
   253  		markedForDeletion := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"})
   254  		testutil.Ok(t, MarkForDeletion(ctx, log.NewNopLogger(), bkt, b1, "", markedForDeletion))
   255  
   256  		// Full delete.
   257  		testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b1))
   258  		testutil.Equals(t, 0, len(bkt.Objects()))
   259  	}
   260  	{
   261  		b2, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   262  			{{Name: "a", Value: "1"}},
   263  			{{Name: "a", Value: "2"}},
   264  			{{Name: "a", Value: "3"}},
   265  			{{Name: "a", Value: "4"}},
   266  			{{Name: "b", Value: "1"}},
   267  		}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   268  		testutil.Ok(t, err)
   269  		testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), metadata.NoneFunc))
   270  		testutil.Equals(t, 3, len(bkt.Objects()))
   271  
   272  		// Remove meta.json and check if delete can delete it.
   273  		testutil.Ok(t, bkt.Delete(ctx, path.Join(b2.String(), MetaFilename)))
   274  		testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, b2))
   275  		testutil.Equals(t, 0, len(bkt.Objects()))
   276  	}
   277  }
   278  
   279  func TestMarkForDeletion(t *testing.T) {
   280  	defer custom.TolerantVerifyLeak(t)
   281  	ctx := context.Background()
   282  
   283  	tmpDir := t.TempDir()
   284  
   285  	for _, tcase := range []struct {
   286  		name      string
   287  		preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   288  
   289  		blocksMarked int
   290  	}{
   291  		{
   292  			name:         "block marked for deletion",
   293  			preUpload:    func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   294  			blocksMarked: 1,
   295  		},
   296  		{
   297  			name: "block with deletion mark already, expected log and no metric increment",
   298  			preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   299  				deletionMark, err := json.Marshal(metadata.DeletionMark{
   300  					ID:           id,
   301  					DeletionTime: time.Now().Unix(),
   302  					Version:      metadata.DeletionMarkVersion1,
   303  				})
   304  				testutil.Ok(t, err)
   305  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.DeletionMarkFilename), bytes.NewReader(deletionMark)))
   306  			},
   307  			blocksMarked: 0,
   308  		},
   309  	} {
   310  		t.Run(tcase.name, func(t *testing.T) {
   311  			bkt := objstore.NewInMemBucket()
   312  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   313  				{{Name: "a", Value: "1"}},
   314  				{{Name: "a", Value: "2"}},
   315  				{{Name: "a", Value: "3"}},
   316  				{{Name: "a", Value: "4"}},
   317  				{{Name: "b", Value: "1"}},
   318  			}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   319  			testutil.Ok(t, err)
   320  
   321  			tcase.preUpload(t, id, bkt)
   322  
   323  			testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc))
   324  
   325  			c := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   326  			err = MarkForDeletion(ctx, log.NewNopLogger(), bkt, id, "", c)
   327  			testutil.Ok(t, err)
   328  			testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c))
   329  		})
   330  	}
   331  }
   332  
   333  func TestMarkForNoCompact(t *testing.T) {
   334  	defer custom.TolerantVerifyLeak(t)
   335  	ctx := context.Background()
   336  
   337  	tmpDir := t.TempDir()
   338  
   339  	for _, tcase := range []struct {
   340  		name      string
   341  		preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   342  
   343  		blocksMarked int
   344  	}{
   345  		{
   346  			name:         "block marked",
   347  			preUpload:    func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   348  			blocksMarked: 1,
   349  		},
   350  		{
   351  			name: "block with no-compact mark already, expected log and no metric increment",
   352  			preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   353  				m, err := json.Marshal(metadata.NoCompactMark{
   354  					ID:            id,
   355  					NoCompactTime: time.Now().Unix(),
   356  					Version:       metadata.NoCompactMarkVersion1,
   357  				})
   358  				testutil.Ok(t, err)
   359  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoCompactMarkFilename), bytes.NewReader(m)))
   360  			},
   361  			blocksMarked: 0,
   362  		},
   363  	} {
   364  		t.Run(tcase.name, func(t *testing.T) {
   365  			bkt := objstore.NewInMemBucket()
   366  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   367  				{{Name: "a", Value: "1"}},
   368  				{{Name: "a", Value: "2"}},
   369  				{{Name: "a", Value: "3"}},
   370  				{{Name: "a", Value: "4"}},
   371  				{{Name: "b", Value: "1"}},
   372  			}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   373  			testutil.Ok(t, err)
   374  
   375  			tcase.preUpload(t, id, bkt)
   376  
   377  			testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc))
   378  
   379  			c := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   380  			err = MarkForNoCompact(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoCompactReason, "", c)
   381  			testutil.Ok(t, err)
   382  			testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c))
   383  		})
   384  	}
   385  }
   386  
   387  func TestMarkForNoDownsample(t *testing.T) {
   388  
   389  	defer custom.TolerantVerifyLeak(t)
   390  	ctx := context.Background()
   391  
   392  	tmpDir := t.TempDir()
   393  
   394  	for _, tcase := range []struct {
   395  		name      string
   396  		preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   397  
   398  		blocksMarked int
   399  	}{
   400  		{
   401  			name:         "block marked",
   402  			preUpload:    func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   403  			blocksMarked: 1,
   404  		},
   405  		{
   406  			name: "block with no-downsample mark already, expected log and no metric increment",
   407  			preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   408  				m, err := json.Marshal(metadata.NoDownsampleMark{
   409  					ID:               id,
   410  					NoDownsampleTime: time.Now().Unix(),
   411  					Version:          metadata.NoDownsampleMarkVersion1,
   412  				})
   413  				testutil.Ok(t, err)
   414  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoDownsampleMarkFilename), bytes.NewReader(m)))
   415  			},
   416  			blocksMarked: 0,
   417  		},
   418  	} {
   419  		t.Run(tcase.name, func(t *testing.T) {
   420  			bkt := objstore.NewInMemBucket()
   421  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   422  				{{Name: "a", Value: "1"}},
   423  				{{Name: "a", Value: "2"}},
   424  				{{Name: "a", Value: "3"}},
   425  				{{Name: "a", Value: "4"}},
   426  				{{Name: "b", Value: "1"}},
   427  			}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   428  			testutil.Ok(t, err)
   429  
   430  			tcase.preUpload(t, id, bkt)
   431  
   432  			testutil.Ok(t, Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()), metadata.NoneFunc))
   433  
   434  			c := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   435  			err = MarkForNoDownsample(ctx, log.NewNopLogger(), bkt, id, metadata.ManualNoDownsampleReason, "", c)
   436  			testutil.Ok(t, err)
   437  			testutil.Equals(t, float64(tcase.blocksMarked), promtest.ToFloat64(c))
   438  		})
   439  	}
   440  }
   441  
   442  // TestHashDownload uploads an empty block to in-memory storage
   443  // and tries to download it to the same dir. It should not try
   444  // to download twice.
   445  func TestHashDownload(t *testing.T) {
   446  	defer custom.TolerantVerifyLeak(t)
   447  
   448  	ctx := context.Background()
   449  
   450  	tmpDir := t.TempDir()
   451  
   452  	bkt := objstore.NewInMemBucket()
   453  	r := prometheus.NewRegistry()
   454  	instrumentedBkt := objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", r), "test")
   455  
   456  	b1, err := e2eutil.CreateBlockWithTombstone(ctx, tmpDir, []labels.Labels{
   457  		{{Name: "a", Value: "1"}},
   458  	}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 42, metadata.SHA256Func)
   459  	testutil.Ok(t, err)
   460  
   461  	testutil.Ok(t, Upload(ctx, log.NewNopLogger(), instrumentedBkt, path.Join(tmpDir, b1.String()), metadata.SHA256Func))
   462  	testutil.Equals(t, 3, len(bkt.Objects()))
   463  
   464  	m, err := DownloadMeta(ctx, log.NewNopLogger(), bkt, b1)
   465  	testutil.Ok(t, err)
   466  
   467  	for _, fl := range m.Thanos.Files {
   468  		if fl.RelPath == MetaFilename {
   469  			continue
   470  		}
   471  		testutil.Assert(t, fl.Hash != nil, "expected a hash for %s but got nil", fl.RelPath)
   472  	}
   473  
   474  	// Remove the hash from one file to check if we always download it.
   475  	m.Thanos.Files[1].Hash = nil
   476  
   477  	metaEncoded := strings.Builder{}
   478  	testutil.Ok(t, m.Write(&metaEncoded))
   479  	testutil.Ok(t, bkt.Upload(ctx, path.Join(b1.String(), MetaFilename), strings.NewReader(metaEncoded.String())))
   480  
   481  	// Only downloads MetaFile and IndexFile.
   482  	{
   483  		err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String()))
   484  		testutil.Ok(t, err)
   485  		testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(`
   486  		# HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket.
   487          # TYPE thanos_objstore_bucket_operations_total counter
   488          thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0
   489          thanos_objstore_bucket_operations_total{bucket="test",operation="delete"} 0
   490          thanos_objstore_bucket_operations_total{bucket="test",operation="exists"} 0
   491          thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 2
   492          thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0
   493          thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 2
   494          thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3
   495  		`), `thanos_objstore_bucket_operations_total`))
   496  	}
   497  
   498  	// Ensures that we always download MetaFile.
   499  	{
   500  		testutil.Ok(t, os.Remove(path.Join(tmpDir, b1.String(), MetaFilename)))
   501  		err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String()))
   502  		testutil.Ok(t, err)
   503  		testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(`
   504  		# HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket.
   505          # TYPE thanos_objstore_bucket_operations_total counter
   506          thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0
   507          thanos_objstore_bucket_operations_total{bucket="test",operation="delete"} 0
   508          thanos_objstore_bucket_operations_total{bucket="test",operation="exists"} 0
   509          thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 4
   510          thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0
   511          thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 4
   512          thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3
   513  		`), `thanos_objstore_bucket_operations_total`))
   514  	}
   515  
   516  	// Remove chunks => gets redownloaded.
   517  	// Always downloads MetaFile.
   518  	// Finally, downloads the IndexFile since we have removed its hash.
   519  	{
   520  		testutil.Ok(t, os.RemoveAll(path.Join(tmpDir, b1.String(), ChunksDirname)))
   521  		err = Download(ctx, log.NewNopLogger(), instrumentedBkt, m.ULID, path.Join(tmpDir, b1.String()))
   522  		testutil.Ok(t, err)
   523  		testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(`
   524  			# HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket.
   525  			# TYPE thanos_objstore_bucket_operations_total counter
   526  			thanos_objstore_bucket_operations_total{bucket="test",operation="attributes"} 0
   527  			thanos_objstore_bucket_operations_total{bucket="test",operation="delete"} 0
   528  			thanos_objstore_bucket_operations_total{bucket="test",operation="exists"} 0
   529  			thanos_objstore_bucket_operations_total{bucket="test",operation="get"} 7
   530  			thanos_objstore_bucket_operations_total{bucket="test",operation="get_range"} 0
   531  			thanos_objstore_bucket_operations_total{bucket="test",operation="iter"} 6
   532  			thanos_objstore_bucket_operations_total{bucket="test",operation="upload"} 3
   533  			`), `thanos_objstore_bucket_operations_total`))
   534  	}
   535  }
   536  
   537  func TestUploadCleanup(t *testing.T) {
   538  	defer custom.TolerantVerifyLeak(t)
   539  
   540  	ctx := context.Background()
   541  
   542  	tmpDir := t.TempDir()
   543  
   544  	bkt := objstore.NewInMemBucket()
   545  	b1, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   546  		{{Name: "a", Value: "1"}},
   547  		{{Name: "a", Value: "2"}},
   548  		{{Name: "a", Value: "3"}},
   549  		{{Name: "a", Value: "4"}},
   550  		{{Name: "b", Value: "1"}},
   551  	}, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "val1"}}, 124, metadata.NoneFunc)
   552  	testutil.Ok(t, err)
   553  
   554  	{
   555  		errBkt := errBucket{Bucket: bkt, failSuffix: "/index"}
   556  
   557  		uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc)
   558  		testutil.Assert(t, errors.Is(uploadErr, errUploadFailed))
   559  
   560  		// If upload of index fails, block is deleted.
   561  		testutil.Equals(t, 0, len(bkt.Objects()))
   562  		testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0)
   563  	}
   564  
   565  	{
   566  		errBkt := errBucket{Bucket: bkt, failSuffix: "/meta.json"}
   567  
   568  		uploadErr := Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String()), metadata.NoneFunc)
   569  		testutil.Assert(t, errors.Is(uploadErr, errUploadFailed))
   570  
   571  		// If upload of meta.json fails, nothing is cleaned up.
   572  		testutil.Equals(t, 3, len(bkt.Objects()))
   573  		testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]) > 0)
   574  		testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]) > 0)
   575  		testutil.Assert(t, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]) > 0)
   576  		testutil.Assert(t, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))]) == 0)
   577  	}
   578  }
   579  
   580  var errUploadFailed = errors.New("upload failed")
   581  
   582  type errBucket struct {
   583  	objstore.Bucket
   584  
   585  	failSuffix string
   586  }
   587  
   588  func (eb errBucket) Upload(ctx context.Context, name string, r io.Reader) error {
   589  	err := eb.Bucket.Upload(ctx, name, r)
   590  	if err != nil {
   591  		return err
   592  	}
   593  
   594  	if strings.HasSuffix(name, eb.failSuffix) {
   595  		return errUploadFailed
   596  	}
   597  	return nil
   598  }
   599  
   600  func TestRemoveMarkForDeletion(t *testing.T) {
   601  	defer custom.TolerantVerifyLeak(t)
   602  	ctx := context.Background()
   603  	tmpDir := t.TempDir()
   604  	for _, testcases := range []struct {
   605  		name           string
   606  		preDelete      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   607  		blocksUnmarked int
   608  	}{
   609  		{
   610  			name: "unmarked block for deletion",
   611  			preDelete: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   612  				deletionMark, err := json.Marshal(metadata.DeletionMark{
   613  					ID:           id,
   614  					DeletionTime: time.Now().Unix(),
   615  					Version:      metadata.DeletionMarkVersion1,
   616  				})
   617  				testutil.Ok(t, err)
   618  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.DeletionMarkFilename), bytes.NewReader(deletionMark)))
   619  			},
   620  			blocksUnmarked: 1,
   621  		},
   622  		{
   623  			name:           "block not marked for deletion, message logged and metric not incremented",
   624  			preDelete:      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   625  			blocksUnmarked: 0,
   626  		},
   627  	} {
   628  		t.Run(testcases.name, func(t *testing.T) {
   629  			bkt := objstore.NewInMemBucket()
   630  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   631  				{{Name: "cluster-eu1", Value: "service-1"}},
   632  				{{Name: "cluster-eu1", Value: "service-2"}},
   633  				{{Name: "cluster-eu1", Value: "service-3"}},
   634  				{{Name: "cluster-us1", Value: "service-1"}},
   635  				{{Name: "cluster-us1", Value: "service-2"}},
   636  			}, 100, 0, 1000, labels.Labels{{Name: "region-1", Value: "eu-west"}}, 124, metadata.NoneFunc)
   637  			testutil.Ok(t, err)
   638  			testcases.preDelete(t, id, bkt)
   639  			counter := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   640  			err = RemoveMark(ctx, log.NewNopLogger(), bkt, id, counter, metadata.DeletionMarkFilename)
   641  			testutil.Ok(t, err)
   642  			testutil.Equals(t, float64(testcases.blocksUnmarked), promtest.ToFloat64(counter))
   643  		})
   644  	}
   645  }
   646  
   647  func TestRemoveMarkForNoCompact(t *testing.T) {
   648  	defer custom.TolerantVerifyLeak(t)
   649  	ctx := context.Background()
   650  	tmpDir := t.TempDir()
   651  	for _, testCases := range []struct {
   652  		name           string
   653  		preDelete      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   654  		blocksUnmarked int
   655  	}{
   656  		{
   657  			name: "unmarked block for no-compact",
   658  			preDelete: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   659  				m, err := json.Marshal(metadata.NoCompactMark{
   660  					ID:            id,
   661  					NoCompactTime: time.Now().Unix(),
   662  					Version:       metadata.NoCompactMarkVersion1,
   663  				})
   664  				testutil.Ok(t, err)
   665  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoCompactMarkFilename), bytes.NewReader(m)))
   666  			},
   667  			blocksUnmarked: 1,
   668  		},
   669  		{
   670  			name:           "block not marked for no-compact, message logged and metric not incremented",
   671  			preDelete:      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   672  			blocksUnmarked: 0,
   673  		},
   674  	} {
   675  		t.Run(testCases.name, func(t *testing.T) {
   676  			bkt := objstore.NewInMemBucket()
   677  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   678  				{{Name: "cluster-eu1", Value: "service-1"}},
   679  				{{Name: "cluster-eu1", Value: "service-2"}},
   680  				{{Name: "cluster-eu1", Value: "service-3"}},
   681  				{{Name: "cluster-us1", Value: "service-1"}},
   682  				{{Name: "cluster-us1", Value: "service-2"}},
   683  			}, 100, 0, 1000, labels.Labels{{Name: "region-1", Value: "eu-west"}}, 124, metadata.NoneFunc)
   684  			testutil.Ok(t, err)
   685  			testCases.preDelete(t, id, bkt)
   686  			counter := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   687  			err = RemoveMark(ctx, log.NewNopLogger(), bkt, id, counter, metadata.NoCompactMarkFilename)
   688  			testutil.Ok(t, err)
   689  			testutil.Equals(t, float64(testCases.blocksUnmarked), promtest.ToFloat64(counter))
   690  		})
   691  	}
   692  }
   693  
   694  func TestRemoveMmarkForNoDownsample(t *testing.T) {
   695  	defer custom.TolerantVerifyLeak(t)
   696  	ctx := context.Background()
   697  	tmpDir := t.TempDir()
   698  	for _, testCases := range []struct {
   699  		name           string
   700  		preDelete      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket)
   701  		blocksUnmarked int
   702  	}{
   703  		{
   704  			name: "unmarked block for no-downsample",
   705  			preDelete: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {
   706  				m, err := json.Marshal(metadata.NoDownsampleMark{
   707  					ID:               id,
   708  					NoDownsampleTime: time.Now().Unix(),
   709  					Version:          metadata.NoDownsampleMarkVersion1,
   710  				})
   711  				testutil.Ok(t, err)
   712  				testutil.Ok(t, bkt.Upload(ctx, path.Join(id.String(), metadata.NoDownsampleMarkFilename), bytes.NewReader(m)))
   713  			},
   714  			blocksUnmarked: 1,
   715  		},
   716  		{
   717  			name:           "block not marked for no-downsample, message logged and metric not incremented",
   718  			preDelete:      func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
   719  			blocksUnmarked: 0,
   720  		},
   721  	} {
   722  		t.Run(testCases.name, func(t *testing.T) {
   723  			bkt := objstore.NewInMemBucket()
   724  			id, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{
   725  				{{Name: "cluster-eu1", Value: "service-1"}},
   726  				{{Name: "cluster-eu1", Value: "service-2"}},
   727  				{{Name: "cluster-eu1", Value: "service-3"}},
   728  				{{Name: "cluster-us1", Value: "service-1"}},
   729  				{{Name: "cluster-us1", Value: "service-2"}},
   730  			}, 100, 0, 1000, labels.Labels{{Name: "region-1", Value: "eu-west"}}, 124, metadata.NoneFunc)
   731  			testutil.Ok(t, err)
   732  			testCases.preDelete(t, id, bkt)
   733  			counter := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
   734  			err = RemoveMark(ctx, log.NewNopLogger(), bkt, id, counter, metadata.NoDownsampleMarkFilename)
   735  			testutil.Ok(t, err)
   736  			testutil.Equals(t, float64(testCases.blocksUnmarked), promtest.ToFloat64(counter))
   737  		})
   738  	}
   739  }