github.com/codingfuture/orig-energi3@v0.8.4/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2018 The Energi Core Authors
     2  // Copyright 2018 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package storage
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"encoding/binary"
    24  	"fmt"
    25  	"io/ioutil"
    26  	"os"
    27  	"strconv"
    28  	"strings"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	ch "github.com/ethereum/go-ethereum/swarm/chunk"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    36  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    37  )
    38  
    39  type testDbStore struct {
    40  	*LDBStore
    41  	dir string
    42  }
    43  
    44  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    45  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    46  	if err != nil {
    47  		return nil, func() {}, err
    48  	}
    49  
    50  	var db *LDBStore
    51  	storeparams := NewDefaultStoreParams()
    52  	params := NewLDBStoreParams(storeparams, dir)
    53  	params.Po = testPoFunc
    54  
    55  	if mock {
    56  		globalStore := mem.NewGlobalStore()
    57  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    58  		mockStore := globalStore.NewNodeStore(addr)
    59  
    60  		db, err = NewMockDbStore(params, mockStore)
    61  	} else {
    62  		db, err = NewLDBStore(params)
    63  	}
    64  
    65  	cleanup := func() {
    66  		if db != nil {
    67  			db.Close()
    68  		}
    69  		err = os.RemoveAll(dir)
    70  		if err != nil {
    71  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    72  		}
    73  	}
    74  
    75  	return &testDbStore{db, dir}, cleanup, err
    76  }
    77  
    78  func testPoFunc(k Address) (ret uint8) {
    79  	basekey := make([]byte, 32)
    80  	return uint8(Proximity(basekey, k[:]))
    81  }
    82  
    83  func testDbStoreRandom(n int, mock bool, t *testing.T) {
    84  	db, cleanup, err := newTestDbStore(mock, true)
    85  	defer cleanup()
    86  	if err != nil {
    87  		t.Fatalf("init dbStore failed: %v", err)
    88  	}
    89  	testStoreRandom(db, n, t)
    90  }
    91  
    92  func testDbStoreCorrect(n int, mock bool, t *testing.T) {
    93  	db, cleanup, err := newTestDbStore(mock, false)
    94  	defer cleanup()
    95  	if err != nil {
    96  		t.Fatalf("init dbStore failed: %v", err)
    97  	}
    98  	testStoreCorrect(db, n, t)
    99  }
   100  
   101  func TestMarkAccessed(t *testing.T) {
   102  	db, cleanup, err := newTestDbStore(false, true)
   103  	defer cleanup()
   104  	if err != nil {
   105  		t.Fatalf("init dbStore failed: %v", err)
   106  	}
   107  
   108  	h := GenerateRandomChunk(ch.DefaultSize)
   109  
   110  	db.Put(context.Background(), h)
   111  
   112  	var index dpaDBIndex
   113  	addr := h.Address()
   114  	idxk := getIndexKey(addr)
   115  
   116  	idata, err := db.db.Get(idxk)
   117  	if err != nil {
   118  		t.Fatal(err)
   119  	}
   120  	decodeIndex(idata, &index)
   121  
   122  	if index.Access != 0 {
   123  		t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
   124  	}
   125  
   126  	db.MarkAccessed(addr)
   127  	db.writeCurrentBatch()
   128  
   129  	idata, err = db.db.Get(idxk)
   130  	if err != nil {
   131  		t.Fatal(err)
   132  	}
   133  	decodeIndex(idata, &index)
   134  
   135  	if index.Access != 1 {
   136  		t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
   137  	}
   138  
   139  }
   140  
   141  func TestDbStoreRandom_1(t *testing.T) {
   142  	testDbStoreRandom(1, false, t)
   143  }
   144  
   145  func TestDbStoreCorrect_1(t *testing.T) {
   146  	testDbStoreCorrect(1, false, t)
   147  }
   148  
   149  func TestDbStoreRandom_1k(t *testing.T) {
   150  	testDbStoreRandom(1000, false, t)
   151  }
   152  
   153  func TestDbStoreCorrect_1k(t *testing.T) {
   154  	testDbStoreCorrect(1000, false, t)
   155  }
   156  
   157  func TestMockDbStoreRandom_1(t *testing.T) {
   158  	testDbStoreRandom(1, true, t)
   159  }
   160  
   161  func TestMockDbStoreCorrect_1(t *testing.T) {
   162  	testDbStoreCorrect(1, true, t)
   163  }
   164  
   165  func TestMockDbStoreRandom_1k(t *testing.T) {
   166  	testDbStoreRandom(1000, true, t)
   167  }
   168  
   169  func TestMockDbStoreCorrect_1k(t *testing.T) {
   170  	testDbStoreCorrect(1000, true, t)
   171  }
   172  
   173  func testDbStoreNotFound(t *testing.T, mock bool) {
   174  	db, cleanup, err := newTestDbStore(mock, false)
   175  	defer cleanup()
   176  	if err != nil {
   177  		t.Fatalf("init dbStore failed: %v", err)
   178  	}
   179  
   180  	_, err = db.Get(context.TODO(), ZeroAddr)
   181  	if err != ErrChunkNotFound {
   182  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   183  	}
   184  }
   185  
   186  func TestDbStoreNotFound(t *testing.T) {
   187  	testDbStoreNotFound(t, false)
   188  }
   189  func TestMockDbStoreNotFound(t *testing.T) {
   190  	testDbStoreNotFound(t, true)
   191  }
   192  
   193  func testIterator(t *testing.T, mock bool) {
   194  	var chunkcount int = 32
   195  	var i int
   196  	var poc uint
   197  	chunkkeys := NewAddressCollection(chunkcount)
   198  	chunkkeys_results := NewAddressCollection(chunkcount)
   199  
   200  	db, cleanup, err := newTestDbStore(mock, false)
   201  	defer cleanup()
   202  	if err != nil {
   203  		t.Fatalf("init dbStore failed: %v", err)
   204  	}
   205  
   206  	chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
   207  
   208  	for i = 0; i < len(chunks); i++ {
   209  		chunkkeys[i] = chunks[i].Address()
   210  		err := db.Put(context.TODO(), chunks[i])
   211  		if err != nil {
   212  			t.Fatalf("dbStore.Put failed: %v", err)
   213  		}
   214  	}
   215  
   216  	for i = 0; i < len(chunkkeys); i++ {
   217  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   218  	}
   219  	i = 0
   220  	for poc = 0; poc <= 255; poc++ {
   221  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   222  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   223  			chunkkeys_results[n] = k
   224  			i++
   225  			return true
   226  		})
   227  		if err != nil {
   228  			t.Fatalf("Iterator call failed: %v", err)
   229  		}
   230  	}
   231  
   232  	for i = 0; i < chunkcount; i++ {
   233  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   234  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   235  		}
   236  	}
   237  
   238  }
   239  
   240  func TestIterator(t *testing.T) {
   241  	testIterator(t, false)
   242  }
   243  func TestMockIterator(t *testing.T) {
   244  	testIterator(t, true)
   245  }
   246  
   247  func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
   248  	db, cleanup, err := newTestDbStore(mock, true)
   249  	defer cleanup()
   250  	if err != nil {
   251  		b.Fatalf("init dbStore failed: %v", err)
   252  	}
   253  	benchmarkStorePut(db, n, b)
   254  }
   255  
   256  func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
   257  	db, cleanup, err := newTestDbStore(mock, true)
   258  	defer cleanup()
   259  	if err != nil {
   260  		b.Fatalf("init dbStore failed: %v", err)
   261  	}
   262  	benchmarkStoreGet(db, n, b)
   263  }
   264  
   265  func BenchmarkDbStorePut_500(b *testing.B) {
   266  	benchmarkDbStorePut(500, false, b)
   267  }
   268  
   269  func BenchmarkDbStoreGet_500(b *testing.B) {
   270  	benchmarkDbStoreGet(500, false, b)
   271  }
   272  
   273  func BenchmarkMockDbStorePut_500(b *testing.B) {
   274  	benchmarkDbStorePut(500, true, b)
   275  }
   276  
   277  func BenchmarkMockDbStoreGet_500(b *testing.B) {
   278  	benchmarkDbStoreGet(500, true, b)
   279  }
   280  
   281  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   282  // retrieve them, provided we don't hit the garbage collection
   283  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   284  	capacity := 50
   285  	n := 10
   286  
   287  	ldb, cleanup := newLDBStore(t)
   288  	ldb.setCapacity(uint64(capacity))
   289  	defer cleanup()
   290  
   291  	chunks, err := mputRandomChunks(ldb, n)
   292  	if err != nil {
   293  		t.Fatal(err.Error())
   294  	}
   295  
   296  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   297  
   298  	for _, ch := range chunks {
   299  		ret, err := ldb.Get(context.TODO(), ch.Address())
   300  		if err != nil {
   301  			t.Fatal(err)
   302  		}
   303  
   304  		if !bytes.Equal(ret.Data(), ch.Data()) {
   305  			t.Fatal("expected to get the same data back, but got smth else")
   306  		}
   307  	}
   308  
   309  	if ldb.entryCnt != uint64(n) {
   310  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
   311  	}
   312  
   313  	if ldb.accessCnt != uint64(2*n) {
   314  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
   315  	}
   316  }
   317  
   318  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   319  // retrieve only some of them, because garbage collection must have partially cleared the store
   320  // Also tests that we can delete chunks and that we can trigger garbage collection
   321  func TestLDBStoreCollectGarbage(t *testing.T) {
   322  
   323  	// below max ronud
   324  	initialCap := defaultMaxGCRound / 100
   325  	cap := initialCap / 2
   326  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   327  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   328  
   329  	// at max round
   330  	cap = initialCap
   331  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   332  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   333  
   334  	// more than max around, not on threshold
   335  	cap = initialCap + 500
   336  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   337  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   338  
   339  }
   340  
   341  func testLDBStoreCollectGarbage(t *testing.T) {
   342  	params := strings.Split(t.Name(), "/")
   343  	capacity, err := strconv.Atoi(params[2])
   344  	if err != nil {
   345  		t.Fatal(err)
   346  	}
   347  	n, err := strconv.Atoi(params[3])
   348  	if err != nil {
   349  		t.Fatal(err)
   350  	}
   351  
   352  	ldb, cleanup := newLDBStore(t)
   353  	ldb.setCapacity(uint64(capacity))
   354  	defer cleanup()
   355  
   356  	// retrieve the gc round target count for the db capacity
   357  	ldb.startGC(capacity)
   358  	roundTarget := ldb.gc.target
   359  
   360  	// split put counts to gc target count threshold, and wait for gc to finish in between
   361  	var allChunks []Chunk
   362  	remaining := n
   363  	for remaining > 0 {
   364  		var putCount int
   365  		if remaining < roundTarget {
   366  			putCount = remaining
   367  		} else {
   368  			putCount = roundTarget
   369  		}
   370  		remaining -= putCount
   371  		chunks, err := mputRandomChunks(ldb, putCount)
   372  		if err != nil {
   373  			t.Fatal(err.Error())
   374  		}
   375  		allChunks = append(allChunks, chunks...)
   376  		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
   377  
   378  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   379  		defer cancel()
   380  		waitGc(ctx, ldb)
   381  	}
   382  
   383  	// attempt gets on all put chunks
   384  	var missing int
   385  	for _, ch := range allChunks {
   386  		ret, err := ldb.Get(context.TODO(), ch.Address())
   387  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   388  			missing++
   389  			continue
   390  		}
   391  		if err != nil {
   392  			t.Fatal(err)
   393  		}
   394  
   395  		if !bytes.Equal(ret.Data(), ch.Data()) {
   396  			t.Fatal("expected to get the same data back, but got smth else")
   397  		}
   398  
   399  		log.Trace("got back chunk", "chunk", ret)
   400  	}
   401  
   402  	// all surplus chunks should be missing
   403  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   404  	if missing != expectMissing {
   405  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
   406  	}
   407  
   408  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   409  }
   410  
   411  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   412  func TestLDBStoreAddRemove(t *testing.T) {
   413  	ldb, cleanup := newLDBStore(t)
   414  	ldb.setCapacity(200)
   415  	defer cleanup()
   416  
   417  	n := 100
   418  	chunks, err := mputRandomChunks(ldb, n)
   419  	if err != nil {
   420  		t.Fatalf(err.Error())
   421  	}
   422  
   423  	for i := 0; i < n; i++ {
   424  		// delete all even index chunks
   425  		if i%2 == 0 {
   426  			ldb.Delete(chunks[i].Address())
   427  		}
   428  	}
   429  
   430  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   431  
   432  	for i := 0; i < n; i++ {
   433  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   434  
   435  		if i%2 == 0 {
   436  			// expect even chunks to be missing
   437  			if err == nil {
   438  				t.Fatal("expected chunk to be missing, but got no error")
   439  			}
   440  		} else {
   441  			// expect odd chunks to be retrieved successfully
   442  			if err != nil {
   443  				t.Fatalf("expected no error, but got %s", err)
   444  			}
   445  
   446  			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   447  				t.Fatal("expected to get the same data back, but got smth else")
   448  			}
   449  		}
   450  	}
   451  }
   452  
   453  func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   454  
   455  	params := strings.Split(t.Name(), "/")
   456  	capacity, err := strconv.Atoi(params[2])
   457  	if err != nil {
   458  		t.Fatal(err)
   459  	}
   460  	n, err := strconv.Atoi(params[3])
   461  	if err != nil {
   462  		t.Fatal(err)
   463  	}
   464  
   465  	ldb, cleanup := newLDBStore(t)
   466  	defer cleanup()
   467  	ldb.setCapacity(uint64(capacity))
   468  
   469  	// put capacity count number of chunks
   470  	chunks := make([]Chunk, n)
   471  	for i := 0; i < n; i++ {
   472  		c := GenerateRandomChunk(ch.DefaultSize)
   473  		chunks[i] = c
   474  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   475  	}
   476  
   477  	for i := 0; i < n; i++ {
   478  		err := ldb.Put(context.TODO(), chunks[i])
   479  		if err != nil {
   480  			t.Fatal(err)
   481  		}
   482  	}
   483  
   484  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   485  	defer cancel()
   486  	waitGc(ctx, ldb)
   487  
   488  	// delete all chunks
   489  	// (only count the ones actually deleted, the rest will have been gc'd)
   490  	deletes := 0
   491  	for i := 0; i < n; i++ {
   492  		if ldb.Delete(chunks[i].Address()) == nil {
   493  			deletes++
   494  		}
   495  	}
   496  
   497  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   498  
   499  	if ldb.entryCnt != 0 {
   500  		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
   501  	}
   502  
   503  	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
   504  	expAccessCnt := uint64(n)
   505  	if ldb.accessCnt != expAccessCnt {
   506  		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
   507  	}
   508  
   509  	// retrieve the gc round target count for the db capacity
   510  	ldb.startGC(capacity)
   511  	roundTarget := ldb.gc.target
   512  
   513  	remaining := n
   514  	var puts int
   515  	for remaining > 0 {
   516  		var putCount int
   517  		if remaining < roundTarget {
   518  			putCount = remaining
   519  		} else {
   520  			putCount = roundTarget
   521  		}
   522  		remaining -= putCount
   523  		for putCount > 0 {
   524  			ldb.Put(context.TODO(), chunks[puts])
   525  			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
   526  			puts++
   527  			putCount--
   528  		}
   529  
   530  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   531  		defer cancel()
   532  		waitGc(ctx, ldb)
   533  	}
   534  
   535  	// expect first surplus chunks to be missing, because they have the smallest access value
   536  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   537  	for i := 0; i < expectMissing; i++ {
   538  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   539  		if err == nil {
   540  			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
   541  		}
   542  	}
   543  
   544  	// expect last chunks to be present, as they have the largest access value
   545  	for i := expectMissing; i < n; i++ {
   546  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   547  		if err != nil {
   548  			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
   549  		}
   550  		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   551  			t.Fatal("expected to get the same data back, but got smth else")
   552  		}
   553  	}
   554  }
   555  
   556  // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
   557  func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
   558  
   559  	capacity := defaultMaxGCRound / 100 * 2
   560  	n := capacity - 1
   561  
   562  	ldb, cleanup := newLDBStore(t)
   563  	ldb.setCapacity(uint64(capacity))
   564  	defer cleanup()
   565  
   566  	chunks, err := mputRandomChunks(ldb, n)
   567  	if err != nil {
   568  		t.Fatal(err.Error())
   569  	}
   570  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   571  
   572  	// set first added capacity/2 chunks to highest accesscount
   573  	for i := 0; i < capacity/2; i++ {
   574  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   575  		if err != nil {
   576  			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
   577  		}
   578  	}
   579  	_, err = mputRandomChunks(ldb, 2)
   580  	if err != nil {
   581  		t.Fatal(err.Error())
   582  	}
   583  
   584  	// wait for garbage collection to kick in on the responsible actor
   585  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   586  	defer cancel()
   587  	waitGc(ctx, ldb)
   588  
   589  	var missing int
   590  	for i, ch := range chunks[2 : capacity/2] {
   591  		ret, err := ldb.Get(context.TODO(), ch.Address())
   592  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   593  			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
   594  		}
   595  
   596  		if !bytes.Equal(ret.Data(), ch.Data()) {
   597  			t.Fatal("expected to get the same data back, but got smth else")
   598  		}
   599  		log.Trace("got back chunk", "chunk", ret)
   600  	}
   601  
   602  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   603  }
   604  
   605  func TestCleanIndex(t *testing.T) {
   606  	capacity := 5000
   607  	n := 3
   608  
   609  	ldb, cleanup := newLDBStore(t)
   610  	ldb.setCapacity(uint64(capacity))
   611  	defer cleanup()
   612  
   613  	chunks, err := mputRandomChunks(ldb, n)
   614  	if err != nil {
   615  		t.Fatal(err)
   616  	}
   617  
   618  	// remove the data of the first chunk
   619  	po := ldb.po(chunks[0].Address()[:])
   620  	dataKey := make([]byte, 10)
   621  	dataKey[0] = keyData
   622  	dataKey[1] = byte(po)
   623  	// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
   624  	if _, err := ldb.db.Get(dataKey); err != nil {
   625  		t.Fatal(err)
   626  	}
   627  	if err := ldb.db.Delete(dataKey); err != nil {
   628  		t.Fatal(err)
   629  	}
   630  
   631  	// remove the gc index row for the first chunk
   632  	gcFirstCorrectKey := make([]byte, 9)
   633  	gcFirstCorrectKey[0] = keyGCIdx
   634  	if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
   635  		t.Fatal(err)
   636  	}
   637  
   638  	// warp the gc data of the second chunk
   639  	// this data should be correct again after the clean
   640  	gcSecondCorrectKey := make([]byte, 9)
   641  	gcSecondCorrectKey[0] = keyGCIdx
   642  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
   643  	gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
   644  	if err != nil {
   645  		t.Fatal(err)
   646  	}
   647  	warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
   648  	copy(warpedGCVal[1:], gcSecondCorrectVal)
   649  	if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
   650  		t.Fatal(err)
   651  	}
   652  	if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
   653  		t.Fatal(err)
   654  	}
   655  
   656  	if err := ldb.CleanGCIndex(); err != nil {
   657  		t.Fatal(err)
   658  	}
   659  
   660  	// the index without corresponding data should have been deleted
   661  	idxKey := make([]byte, 33)
   662  	idxKey[0] = keyIndex
   663  	copy(idxKey[1:], chunks[0].Address())
   664  	if _, err := ldb.db.Get(idxKey); err == nil {
   665  		t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
   666  	}
   667  
   668  	// the two other indices should be present
   669  	copy(idxKey[1:], chunks[1].Address())
   670  	if _, err := ldb.db.Get(idxKey); err != nil {
   671  		t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
   672  	}
   673  
   674  	copy(idxKey[1:], chunks[2].Address())
   675  	if _, err := ldb.db.Get(idxKey); err != nil {
   676  		t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
   677  	}
   678  
   679  	// first gc index should still be gone
   680  	if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
   681  		t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
   682  	}
   683  
   684  	// second gc index should still be fixed
   685  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   686  		t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
   687  	}
   688  
   689  	// third gc index should be unchanged
   690  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
   691  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   692  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   693  	}
   694  
   695  	c, err := ldb.db.Get(keyEntryCnt)
   696  	if err != nil {
   697  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   698  	}
   699  
   700  	// entrycount should now be one less
   701  	entryCount := binary.BigEndian.Uint64(c)
   702  	if entryCount != 2 {
   703  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   704  	}
   705  
   706  	// the chunks might accidentally be in the same bin
   707  	// if so that bin counter will now be 2 - the highest added index.
   708  	// if not, the total of them will be 3
   709  	poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
   710  	if poBins[0] == poBins[1] {
   711  		poBins = poBins[:1]
   712  	}
   713  
   714  	var binTotal uint64
   715  	var currentBin [2]byte
   716  	currentBin[0] = keyDistanceCnt
   717  	if len(poBins) == 1 {
   718  		currentBin[1] = poBins[0]
   719  		c, err := ldb.db.Get(currentBin[:])
   720  		if err != nil {
   721  			t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   722  		}
   723  		binCount := binary.BigEndian.Uint64(c)
   724  		if binCount != 2 {
   725  			t.Fatalf("expected entrycnt to be 2, was %d", binCount)
   726  		}
   727  	} else {
   728  		for _, bin := range poBins {
   729  			currentBin[1] = bin
   730  			c, err := ldb.db.Get(currentBin[:])
   731  			if err != nil {
   732  				t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   733  			}
   734  			binCount := binary.BigEndian.Uint64(c)
   735  			binTotal += binCount
   736  
   737  		}
   738  		if binTotal != 3 {
   739  			t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
   740  		}
   741  	}
   742  
   743  	// check that the iterator quits properly
   744  	chunks, err = mputRandomChunks(ldb, 4100)
   745  	if err != nil {
   746  		t.Fatal(err)
   747  	}
   748  
   749  	po = ldb.po(chunks[4099].Address()[:])
   750  	dataKey = make([]byte, 10)
   751  	dataKey[0] = keyData
   752  	dataKey[1] = byte(po)
   753  	binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
   754  	if _, err := ldb.db.Get(dataKey); err != nil {
   755  		t.Fatal(err)
   756  	}
   757  	if err := ldb.db.Delete(dataKey); err != nil {
   758  		t.Fatal(err)
   759  	}
   760  
   761  	if err := ldb.CleanGCIndex(); err != nil {
   762  		t.Fatal(err)
   763  	}
   764  
   765  	// entrycount should now be one less of added chunks
   766  	c, err = ldb.db.Get(keyEntryCnt)
   767  	if err != nil {
   768  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   769  	}
   770  	entryCount = binary.BigEndian.Uint64(c)
   771  	if entryCount != 4099+2 {
   772  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   773  	}
   774  }
   775  
   776  func waitGc(ctx context.Context, ldb *LDBStore) {
   777  	<-ldb.gc.runC
   778  	ldb.gc.runC <- struct{}{}
   779  }