github.com/susy-go/susy-graviton@v0.0.0-20190614130430-36cddae42305/swarm/storage/ldbstore_test.go (about)

     1  // Copyleft 2016 The susy-graviton Authors
     2  // This file is part of the susy-graviton library.
     3  //
     4  // The susy-graviton library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The susy-graviton library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MSRCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the susy-graviton library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"os"
    26  	"strconv"
    27  	"strings"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/susy-go/susy-graviton/common"
    32  	ch "github.com/susy-go/susy-graviton/swarm/chunk"
    33  	"github.com/susy-go/susy-graviton/swarm/log"
    34  	"github.com/susy-go/susy-graviton/swarm/storage/mock/mem"
    35  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    36  )
    37  
    38  type testDbStore struct {
    39  	*LDBStore
    40  	dir string
    41  }
    42  
    43  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    44  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    45  	if err != nil {
    46  		return nil, func() {}, err
    47  	}
    48  
    49  	var db *LDBStore
    50  	storeparams := NewDefaultStoreParams()
    51  	params := NewLDBStoreParams(storeparams, dir)
    52  	params.Po = testPoFunc
    53  
    54  	if mock {
    55  		globalStore := mem.NewGlobalStore()
    56  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    57  		mockStore := globalStore.NewNodeStore(addr)
    58  
    59  		db, err = NewMockDbStore(params, mockStore)
    60  	} else {
    61  		db, err = NewLDBStore(params)
    62  	}
    63  
    64  	cleanup := func() {
    65  		if db != nil {
    66  			db.Close()
    67  		}
    68  		err = os.RemoveAll(dir)
    69  		if err != nil {
    70  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    71  		}
    72  	}
    73  
    74  	return &testDbStore{db, dir}, cleanup, err
    75  }
    76  
    77  func testPoFunc(k Address) (ret uint8) {
    78  	basekey := make([]byte, 32)
    79  	return uint8(Proximity(basekey, k[:]))
    80  }
    81  
    82  func testDbStoreRandom(n int, mock bool, t *testing.T) {
    83  	db, cleanup, err := newTestDbStore(mock, true)
    84  	defer cleanup()
    85  	if err != nil {
    86  		t.Fatalf("init dbStore failed: %v", err)
    87  	}
    88  	testStoreRandom(db, n, t)
    89  }
    90  
    91  func testDbStoreCorrect(n int, mock bool, t *testing.T) {
    92  	db, cleanup, err := newTestDbStore(mock, false)
    93  	defer cleanup()
    94  	if err != nil {
    95  		t.Fatalf("init dbStore failed: %v", err)
    96  	}
    97  	testStoreCorrect(db, n, t)
    98  }
    99  
   100  func TestMarkAccessed(t *testing.T) {
   101  	db, cleanup, err := newTestDbStore(false, true)
   102  	defer cleanup()
   103  	if err != nil {
   104  		t.Fatalf("init dbStore failed: %v", err)
   105  	}
   106  
   107  	h := GenerateRandomChunk(ch.DefaultSize)
   108  
   109  	db.Put(context.Background(), h)
   110  
   111  	var index dpaDBIndex
   112  	addr := h.Address()
   113  	idxk := getIndexKey(addr)
   114  
   115  	idata, err := db.db.Get(idxk)
   116  	if err != nil {
   117  		t.Fatal(err)
   118  	}
   119  	decodeIndex(idata, &index)
   120  
   121  	if index.Access != 0 {
   122  		t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
   123  	}
   124  
   125  	db.MarkAccessed(addr)
   126  	db.writeCurrentBatch()
   127  
   128  	idata, err = db.db.Get(idxk)
   129  	if err != nil {
   130  		t.Fatal(err)
   131  	}
   132  	decodeIndex(idata, &index)
   133  
   134  	if index.Access != 1 {
   135  		t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
   136  	}
   137  
   138  }
   139  
   140  func TestDbStoreRandom_1(t *testing.T) {
   141  	testDbStoreRandom(1, false, t)
   142  }
   143  
   144  func TestDbStoreCorrect_1(t *testing.T) {
   145  	testDbStoreCorrect(1, false, t)
   146  }
   147  
   148  func TestDbStoreRandom_1k(t *testing.T) {
   149  	testDbStoreRandom(1000, false, t)
   150  }
   151  
   152  func TestDbStoreCorrect_1k(t *testing.T) {
   153  	testDbStoreCorrect(1000, false, t)
   154  }
   155  
   156  func TestMockDbStoreRandom_1(t *testing.T) {
   157  	testDbStoreRandom(1, true, t)
   158  }
   159  
   160  func TestMockDbStoreCorrect_1(t *testing.T) {
   161  	testDbStoreCorrect(1, true, t)
   162  }
   163  
   164  func TestMockDbStoreRandom_1k(t *testing.T) {
   165  	testDbStoreRandom(1000, true, t)
   166  }
   167  
   168  func TestMockDbStoreCorrect_1k(t *testing.T) {
   169  	testDbStoreCorrect(1000, true, t)
   170  }
   171  
   172  func testDbStoreNotFound(t *testing.T, mock bool) {
   173  	db, cleanup, err := newTestDbStore(mock, false)
   174  	defer cleanup()
   175  	if err != nil {
   176  		t.Fatalf("init dbStore failed: %v", err)
   177  	}
   178  
   179  	_, err = db.Get(context.TODO(), ZeroAddr)
   180  	if err != ErrChunkNotFound {
   181  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   182  	}
   183  }
   184  
   185  func TestDbStoreNotFound(t *testing.T) {
   186  	testDbStoreNotFound(t, false)
   187  }
   188  func TestMockDbStoreNotFound(t *testing.T) {
   189  	testDbStoreNotFound(t, true)
   190  }
   191  
   192  func testIterator(t *testing.T, mock bool) {
   193  	var chunkcount int = 32
   194  	var i int
   195  	var poc uint
   196  	chunkkeys := NewAddressCollection(chunkcount)
   197  	chunkkeys_results := NewAddressCollection(chunkcount)
   198  
   199  	db, cleanup, err := newTestDbStore(mock, false)
   200  	defer cleanup()
   201  	if err != nil {
   202  		t.Fatalf("init dbStore failed: %v", err)
   203  	}
   204  
   205  	chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
   206  
   207  	for i = 0; i < len(chunks); i++ {
   208  		chunkkeys[i] = chunks[i].Address()
   209  		err := db.Put(context.TODO(), chunks[i])
   210  		if err != nil {
   211  			t.Fatalf("dbStore.Put failed: %v", err)
   212  		}
   213  	}
   214  
   215  	for i = 0; i < len(chunkkeys); i++ {
   216  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   217  	}
   218  	i = 0
   219  	for poc = 0; poc <= 255; poc++ {
   220  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   221  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   222  			chunkkeys_results[n] = k
   223  			i++
   224  			return true
   225  		})
   226  		if err != nil {
   227  			t.Fatalf("Iterator call failed: %v", err)
   228  		}
   229  	}
   230  
   231  	for i = 0; i < chunkcount; i++ {
   232  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   233  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   234  		}
   235  	}
   236  
   237  }
   238  
   239  func TestIterator(t *testing.T) {
   240  	testIterator(t, false)
   241  }
   242  func TestMockIterator(t *testing.T) {
   243  	testIterator(t, true)
   244  }
   245  
   246  func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
   247  	db, cleanup, err := newTestDbStore(mock, true)
   248  	defer cleanup()
   249  	if err != nil {
   250  		b.Fatalf("init dbStore failed: %v", err)
   251  	}
   252  	benchmarkStorePut(db, n, b)
   253  }
   254  
   255  func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
   256  	db, cleanup, err := newTestDbStore(mock, true)
   257  	defer cleanup()
   258  	if err != nil {
   259  		b.Fatalf("init dbStore failed: %v", err)
   260  	}
   261  	benchmarkStoreGet(db, n, b)
   262  }
   263  
   264  func BenchmarkDbStorePut_500(b *testing.B) {
   265  	benchmarkDbStorePut(500, false, b)
   266  }
   267  
   268  func BenchmarkDbStoreGet_500(b *testing.B) {
   269  	benchmarkDbStoreGet(500, false, b)
   270  }
   271  
   272  func BenchmarkMockDbStorePut_500(b *testing.B) {
   273  	benchmarkDbStorePut(500, true, b)
   274  }
   275  
   276  func BenchmarkMockDbStoreGet_500(b *testing.B) {
   277  	benchmarkDbStoreGet(500, true, b)
   278  }
   279  
   280  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   281  // retrieve them, provided we don't hit the garbage collection
   282  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   283  	capacity := 50
   284  	n := 10
   285  
   286  	ldb, cleanup := newLDBStore(t)
   287  	ldb.setCapacity(uint64(capacity))
   288  	defer cleanup()
   289  
   290  	chunks, err := mputRandomChunks(ldb, n)
   291  	if err != nil {
   292  		t.Fatal(err.Error())
   293  	}
   294  
   295  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   296  
   297  	for _, ch := range chunks {
   298  		ret, err := ldb.Get(context.TODO(), ch.Address())
   299  		if err != nil {
   300  			t.Fatal(err)
   301  		}
   302  
   303  		if !bytes.Equal(ret.Data(), ch.Data()) {
   304  			t.Fatal("expected to get the same data back, but got smth else")
   305  		}
   306  	}
   307  
   308  	if ldb.entryCnt != uint64(n) {
   309  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
   310  	}
   311  
   312  	if ldb.accessCnt != uint64(2*n) {
   313  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
   314  	}
   315  }
   316  
   317  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   318  // retrieve only some of them, because garbage collection must have partially cleared the store
   319  // Also tests that we can delete chunks and that we can trigger garbage collection
   320  func TestLDBStoreCollectGarbage(t *testing.T) {
   321  
   322  	// below max ronud
   323  	initialCap := defaultMaxGCRound / 100
   324  	cap := initialCap / 2
   325  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   326  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   327  
   328  	// at max round
   329  	cap = initialCap
   330  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   331  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   332  
   333  	// more than max around, not on threshold
   334  	cap = initialCap + 500
   335  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   336  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   337  
   338  }
   339  
   340  func testLDBStoreCollectGarbage(t *testing.T) {
   341  	params := strings.Split(t.Name(), "/")
   342  	capacity, err := strconv.Atoi(params[2])
   343  	if err != nil {
   344  		t.Fatal(err)
   345  	}
   346  	n, err := strconv.Atoi(params[3])
   347  	if err != nil {
   348  		t.Fatal(err)
   349  	}
   350  
   351  	ldb, cleanup := newLDBStore(t)
   352  	ldb.setCapacity(uint64(capacity))
   353  	defer cleanup()
   354  
   355  	// retrieve the gc round target count for the db capacity
   356  	ldb.startGC(capacity)
   357  	roundTarget := ldb.gc.target
   358  
   359  	// split put counts to gc target count threshold, and wait for gc to finish in between
   360  	var allChunks []Chunk
   361  	remaining := n
   362  	for remaining > 0 {
   363  		var putCount int
   364  		if remaining < roundTarget {
   365  			putCount = remaining
   366  		} else {
   367  			putCount = roundTarget
   368  		}
   369  		remaining -= putCount
   370  		chunks, err := mputRandomChunks(ldb, putCount)
   371  		if err != nil {
   372  			t.Fatal(err.Error())
   373  		}
   374  		allChunks = append(allChunks, chunks...)
   375  		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
   376  
   377  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   378  		defer cancel()
   379  		waitGc(ctx, ldb)
   380  	}
   381  
   382  	// attempt gets on all put chunks
   383  	var missing int
   384  	for _, ch := range allChunks {
   385  		ret, err := ldb.Get(context.TODO(), ch.Address())
   386  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   387  			missing++
   388  			continue
   389  		}
   390  		if err != nil {
   391  			t.Fatal(err)
   392  		}
   393  
   394  		if !bytes.Equal(ret.Data(), ch.Data()) {
   395  			t.Fatal("expected to get the same data back, but got smth else")
   396  		}
   397  
   398  		log.Trace("got back chunk", "chunk", ret)
   399  	}
   400  
   401  	// all surplus chunks should be missing
   402  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   403  	if missing != expectMissing {
   404  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
   405  	}
   406  
   407  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   408  }
   409  
   410  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   411  func TestLDBStoreAddRemove(t *testing.T) {
   412  	ldb, cleanup := newLDBStore(t)
   413  	ldb.setCapacity(200)
   414  	defer cleanup()
   415  
   416  	n := 100
   417  	chunks, err := mputRandomChunks(ldb, n)
   418  	if err != nil {
   419  		t.Fatalf(err.Error())
   420  	}
   421  
   422  	for i := 0; i < n; i++ {
   423  		// delete all even index chunks
   424  		if i%2 == 0 {
   425  			ldb.Delete(chunks[i].Address())
   426  		}
   427  	}
   428  
   429  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   430  
   431  	for i := 0; i < n; i++ {
   432  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   433  
   434  		if i%2 == 0 {
   435  			// expect even chunks to be missing
   436  			if err == nil {
   437  				t.Fatal("expected chunk to be missing, but got no error")
   438  			}
   439  		} else {
   440  			// expect odd chunks to be retrieved successfully
   441  			if err != nil {
   442  				t.Fatalf("expected no error, but got %s", err)
   443  			}
   444  
   445  			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   446  				t.Fatal("expected to get the same data back, but got smth else")
   447  			}
   448  		}
   449  	}
   450  }
   451  
   452  func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   453  
   454  	params := strings.Split(t.Name(), "/")
   455  	capacity, err := strconv.Atoi(params[2])
   456  	if err != nil {
   457  		t.Fatal(err)
   458  	}
   459  	n, err := strconv.Atoi(params[3])
   460  	if err != nil {
   461  		t.Fatal(err)
   462  	}
   463  
   464  	ldb, cleanup := newLDBStore(t)
   465  	defer cleanup()
   466  	ldb.setCapacity(uint64(capacity))
   467  
   468  	// put capacity count number of chunks
   469  	chunks := make([]Chunk, n)
   470  	for i := 0; i < n; i++ {
   471  		c := GenerateRandomChunk(ch.DefaultSize)
   472  		chunks[i] = c
   473  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   474  	}
   475  
   476  	for i := 0; i < n; i++ {
   477  		err := ldb.Put(context.TODO(), chunks[i])
   478  		if err != nil {
   479  			t.Fatal(err)
   480  		}
   481  	}
   482  
   483  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   484  	defer cancel()
   485  	waitGc(ctx, ldb)
   486  
   487  	// delete all chunks
   488  	// (only count the ones actually deleted, the rest will have been gc'd)
   489  	deletes := 0
   490  	for i := 0; i < n; i++ {
   491  		if ldb.Delete(chunks[i].Address()) == nil {
   492  			deletes++
   493  		}
   494  	}
   495  
   496  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   497  
   498  	if ldb.entryCnt != 0 {
   499  		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
   500  	}
   501  
   502  	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
   503  	expAccessCnt := uint64(n)
   504  	if ldb.accessCnt != expAccessCnt {
   505  		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
   506  	}
   507  
   508  	// retrieve the gc round target count for the db capacity
   509  	ldb.startGC(capacity)
   510  	roundTarget := ldb.gc.target
   511  
   512  	remaining := n
   513  	var puts int
   514  	for remaining > 0 {
   515  		var putCount int
   516  		if remaining < roundTarget {
   517  			putCount = remaining
   518  		} else {
   519  			putCount = roundTarget
   520  		}
   521  		remaining -= putCount
   522  		for putCount > 0 {
   523  			ldb.Put(context.TODO(), chunks[puts])
   524  			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
   525  			puts++
   526  			putCount--
   527  		}
   528  
   529  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   530  		defer cancel()
   531  		waitGc(ctx, ldb)
   532  	}
   533  
   534  	// expect first surplus chunks to be missing, because they have the smallest access value
   535  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   536  	for i := 0; i < expectMissing; i++ {
   537  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   538  		if err == nil {
   539  			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
   540  		}
   541  	}
   542  
   543  	// expect last chunks to be present, as they have the largest access value
   544  	for i := expectMissing; i < n; i++ {
   545  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   546  		if err != nil {
   547  			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
   548  		}
   549  		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   550  			t.Fatal("expected to get the same data back, but got smth else")
   551  		}
   552  	}
   553  }
   554  
   555  // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
   556  func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
   557  
   558  	capacity := defaultMaxGCRound / 100 * 2
   559  	n := capacity - 1
   560  
   561  	ldb, cleanup := newLDBStore(t)
   562  	ldb.setCapacity(uint64(capacity))
   563  	defer cleanup()
   564  
   565  	chunks, err := mputRandomChunks(ldb, n)
   566  	if err != nil {
   567  		t.Fatal(err.Error())
   568  	}
   569  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   570  
   571  	// set first added capacity/2 chunks to highest accesscount
   572  	for i := 0; i < capacity/2; i++ {
   573  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   574  		if err != nil {
   575  			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
   576  		}
   577  	}
   578  	_, err = mputRandomChunks(ldb, 2)
   579  	if err != nil {
   580  		t.Fatal(err.Error())
   581  	}
   582  
   583  	// wait for garbage collection to kick in on the responsible actor
   584  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   585  	defer cancel()
   586  	waitGc(ctx, ldb)
   587  
   588  	var missing int
   589  	for i, ch := range chunks[2 : capacity/2] {
   590  		ret, err := ldb.Get(context.TODO(), ch.Address())
   591  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   592  			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
   593  		}
   594  
   595  		if !bytes.Equal(ret.Data(), ch.Data()) {
   596  			t.Fatal("expected to get the same data back, but got smth else")
   597  		}
   598  		log.Trace("got back chunk", "chunk", ret)
   599  	}
   600  
   601  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   602  }
   603  
   604  func TestCleanIndex(t *testing.T) {
   605  	capacity := 5000
   606  	n := 3
   607  
   608  	ldb, cleanup := newLDBStore(t)
   609  	ldb.setCapacity(uint64(capacity))
   610  	defer cleanup()
   611  
   612  	chunks, err := mputRandomChunks(ldb, n)
   613  	if err != nil {
   614  		t.Fatal(err)
   615  	}
   616  
   617  	// remove the data of the first chunk
   618  	po := ldb.po(chunks[0].Address()[:])
   619  	dataKey := make([]byte, 10)
   620  	dataKey[0] = keyData
   621  	dataKey[1] = byte(po)
   622  	// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
   623  	if _, err := ldb.db.Get(dataKey); err != nil {
   624  		t.Fatal(err)
   625  	}
   626  	if err := ldb.db.Delete(dataKey); err != nil {
   627  		t.Fatal(err)
   628  	}
   629  
   630  	// remove the gc index row for the first chunk
   631  	gcFirstCorrectKey := make([]byte, 9)
   632  	gcFirstCorrectKey[0] = keyGCIdx
   633  	if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
   634  		t.Fatal(err)
   635  	}
   636  
   637  	// warp the gc data of the second chunk
   638  	// this data should be correct again after the clean
   639  	gcSecondCorrectKey := make([]byte, 9)
   640  	gcSecondCorrectKey[0] = keyGCIdx
   641  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
   642  	gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
   643  	if err != nil {
   644  		t.Fatal(err)
   645  	}
   646  	warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
   647  	copy(warpedGCVal[1:], gcSecondCorrectVal)
   648  	if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
   649  		t.Fatal(err)
   650  	}
   651  	if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
   652  		t.Fatal(err)
   653  	}
   654  
   655  	if err := ldb.CleanGCIndex(); err != nil {
   656  		t.Fatal(err)
   657  	}
   658  
   659  	// the index without corresponding data should have been deleted
   660  	idxKey := make([]byte, 33)
   661  	idxKey[0] = keyIndex
   662  	copy(idxKey[1:], chunks[0].Address())
   663  	if _, err := ldb.db.Get(idxKey); err == nil {
   664  		t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
   665  	}
   666  
   667  	// the two other indices should be present
   668  	copy(idxKey[1:], chunks[1].Address())
   669  	if _, err := ldb.db.Get(idxKey); err != nil {
   670  		t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
   671  	}
   672  
   673  	copy(idxKey[1:], chunks[2].Address())
   674  	if _, err := ldb.db.Get(idxKey); err != nil {
   675  		t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
   676  	}
   677  
   678  	// first gc index should still be gone
   679  	if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
   680  		t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
   681  	}
   682  
   683  	// second gc index should still be fixed
   684  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   685  		t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
   686  	}
   687  
   688  	// third gc index should be unchanged
   689  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
   690  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   691  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   692  	}
   693  
   694  	c, err := ldb.db.Get(keyEntryCnt)
   695  	if err != nil {
   696  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   697  	}
   698  
   699  	// entrycount should now be one less
   700  	entryCount := binary.BigEndian.Uint64(c)
   701  	if entryCount != 2 {
   702  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   703  	}
   704  
   705  	// the chunks might accidentally be in the same bin
   706  	// if so that bin counter will now be 2 - the highest added index.
   707  	// if not, the total of them will be 3
   708  	poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
   709  	if poBins[0] == poBins[1] {
   710  		poBins = poBins[:1]
   711  	}
   712  
   713  	var binTotal uint64
   714  	var currentBin [2]byte
   715  	currentBin[0] = keyDistanceCnt
   716  	if len(poBins) == 1 {
   717  		currentBin[1] = poBins[0]
   718  		c, err := ldb.db.Get(currentBin[:])
   719  		if err != nil {
   720  			t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   721  		}
   722  		binCount := binary.BigEndian.Uint64(c)
   723  		if binCount != 2 {
   724  			t.Fatalf("expected entrycnt to be 2, was %d", binCount)
   725  		}
   726  	} else {
   727  		for _, bin := range poBins {
   728  			currentBin[1] = bin
   729  			c, err := ldb.db.Get(currentBin[:])
   730  			if err != nil {
   731  				t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   732  			}
   733  			binCount := binary.BigEndian.Uint64(c)
   734  			binTotal += binCount
   735  
   736  		}
   737  		if binTotal != 3 {
   738  			t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
   739  		}
   740  	}
   741  
   742  	// check that the iterator quits properly
   743  	chunks, err = mputRandomChunks(ldb, 4100)
   744  	if err != nil {
   745  		t.Fatal(err)
   746  	}
   747  
   748  	po = ldb.po(chunks[4099].Address()[:])
   749  	dataKey = make([]byte, 10)
   750  	dataKey[0] = keyData
   751  	dataKey[1] = byte(po)
   752  	binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
   753  	if _, err := ldb.db.Get(dataKey); err != nil {
   754  		t.Fatal(err)
   755  	}
   756  	if err := ldb.db.Delete(dataKey); err != nil {
   757  		t.Fatal(err)
   758  	}
   759  
   760  	if err := ldb.CleanGCIndex(); err != nil {
   761  		t.Fatal(err)
   762  	}
   763  
   764  	// entrycount should now be one less of added chunks
   765  	c, err = ldb.db.Get(keyEntryCnt)
   766  	if err != nil {
   767  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   768  	}
   769  	entryCount = binary.BigEndian.Uint64(c)
   770  	if entryCount != 4099+2 {
   771  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   772  	}
   773  }
   774  
   775  func waitGc(ctx context.Context, ldb *LDBStore) {
   776  	<-ldb.gc.runC
   777  	ldb.gc.runC <- struct{}{}
   778  }