github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"os"
    26  	"strconv"
    27  	"strings"
    28  	"testing"
    29  
    30  	"github.com/ethereum/go-ethereum/swarm/testutil"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/swarm/chunk"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    36  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    37  )
    38  
    39  type testDbStore struct {
    40  	*LDBStore
    41  	dir string
    42  }
    43  
    44  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    45  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    46  	if err != nil {
    47  		return nil, func() {}, err
    48  	}
    49  
    50  	var db *LDBStore
    51  	storeparams := NewDefaultStoreParams()
    52  	params := NewLDBStoreParams(storeparams, dir)
    53  	params.Po = testPoFunc
    54  
    55  	if mock {
    56  		globalStore := mem.NewGlobalStore()
    57  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    58  		mockStore := globalStore.NewNodeStore(addr)
    59  
    60  		db, err = NewMockDbStore(params, mockStore)
    61  	} else {
    62  		db, err = NewLDBStore(params)
    63  	}
    64  
    65  	cleanup := func() {
    66  		if db != nil {
    67  			db.Close()
    68  		}
    69  		err = os.RemoveAll(dir)
    70  		if err != nil {
    71  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    72  		}
    73  	}
    74  
    75  	return &testDbStore{db, dir}, cleanup, err
    76  }
    77  
    78  func testPoFunc(k Address) (ret uint8) {
    79  	basekey := make([]byte, 32)
    80  	return uint8(Proximity(basekey, k[:]))
    81  }
    82  
    83  func testDbStoreRandom(n int, mock bool, t *testing.T) {
    84  	db, cleanup, err := newTestDbStore(mock, true)
    85  	defer cleanup()
    86  	if err != nil {
    87  		t.Fatalf("init dbStore failed: %v", err)
    88  	}
    89  	testStoreRandom(db, n, t)
    90  }
    91  
    92  func testDbStoreCorrect(n int, mock bool, t *testing.T) {
    93  	db, cleanup, err := newTestDbStore(mock, false)
    94  	defer cleanup()
    95  	if err != nil {
    96  		t.Fatalf("init dbStore failed: %v", err)
    97  	}
    98  	testStoreCorrect(db, n, t)
    99  }
   100  
   101  func TestMarkAccessed(t *testing.T) {
   102  	db, cleanup, err := newTestDbStore(false, true)
   103  	defer cleanup()
   104  	if err != nil {
   105  		t.Fatalf("init dbStore failed: %v", err)
   106  	}
   107  
   108  	h := GenerateRandomChunk(chunk.DefaultSize)
   109  
   110  	db.Put(context.Background(), h)
   111  
   112  	var index dpaDBIndex
   113  	addr := h.Address()
   114  	idxk := getIndexKey(addr)
   115  
   116  	idata, err := db.db.Get(idxk)
   117  	if err != nil {
   118  		t.Fatal(err)
   119  	}
   120  	decodeIndex(idata, &index)
   121  
   122  	if index.Access != 0 {
   123  		t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
   124  	}
   125  
   126  	db.MarkAccessed(addr)
   127  	db.writeCurrentBatch()
   128  
   129  	idata, err = db.db.Get(idxk)
   130  	if err != nil {
   131  		t.Fatal(err)
   132  	}
   133  	decodeIndex(idata, &index)
   134  
   135  	if index.Access != 1 {
   136  		t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
   137  	}
   138  
   139  }
   140  
   141  func TestDbStoreRandom_1(t *testing.T) {
   142  	testDbStoreRandom(1, false, t)
   143  }
   144  
   145  func TestDbStoreCorrect_1(t *testing.T) {
   146  	testDbStoreCorrect(1, false, t)
   147  }
   148  
   149  func TestDbStoreRandom_1k(t *testing.T) {
   150  	testDbStoreRandom(1000, false, t)
   151  }
   152  
   153  func TestDbStoreCorrect_1k(t *testing.T) {
   154  	testDbStoreCorrect(1000, false, t)
   155  }
   156  
   157  func TestMockDbStoreRandom_1(t *testing.T) {
   158  	testDbStoreRandom(1, true, t)
   159  }
   160  
   161  func TestMockDbStoreCorrect_1(t *testing.T) {
   162  	testDbStoreCorrect(1, true, t)
   163  }
   164  
   165  func TestMockDbStoreRandom_1k(t *testing.T) {
   166  	testDbStoreRandom(1000, true, t)
   167  }
   168  
   169  func TestMockDbStoreCorrect_1k(t *testing.T) {
   170  	testDbStoreCorrect(1000, true, t)
   171  }
   172  
   173  func testDbStoreNotFound(t *testing.T, mock bool) {
   174  	db, cleanup, err := newTestDbStore(mock, false)
   175  	defer cleanup()
   176  	if err != nil {
   177  		t.Fatalf("init dbStore failed: %v", err)
   178  	}
   179  
   180  	_, err = db.Get(context.TODO(), ZeroAddr)
   181  	if err != ErrChunkNotFound {
   182  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   183  	}
   184  }
   185  
   186  func TestDbStoreNotFound(t *testing.T) {
   187  	testDbStoreNotFound(t, false)
   188  }
   189  func TestMockDbStoreNotFound(t *testing.T) {
   190  	testDbStoreNotFound(t, true)
   191  }
   192  
   193  func testIterator(t *testing.T, mock bool) {
   194  	var i int
   195  	var poc uint
   196  	chunkcount := 32
   197  	chunkkeys := NewAddressCollection(chunkcount)
   198  	chunkkeysResults := NewAddressCollection(chunkcount)
   199  
   200  	db, cleanup, err := newTestDbStore(mock, false)
   201  	defer cleanup()
   202  	if err != nil {
   203  		t.Fatalf("init dbStore failed: %v", err)
   204  	}
   205  
   206  	chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
   207  
   208  	for i = 0; i < len(chunks); i++ {
   209  		chunkkeys[i] = chunks[i].Address()
   210  		err := db.Put(context.TODO(), chunks[i])
   211  		if err != nil {
   212  			t.Fatalf("dbStore.Put failed: %v", err)
   213  		}
   214  	}
   215  
   216  	for i = 0; i < len(chunkkeys); i++ {
   217  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   218  	}
   219  	i = 0
   220  	for poc = 0; poc <= 255; poc++ {
   221  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   222  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   223  			chunkkeysResults[n] = k
   224  			i++
   225  			return true
   226  		})
   227  		if err != nil {
   228  			t.Fatalf("Iterator call failed: %v", err)
   229  		}
   230  	}
   231  
   232  	for i = 0; i < chunkcount; i++ {
   233  		if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
   234  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
   235  		}
   236  	}
   237  
   238  }
   239  
   240  func TestIterator(t *testing.T) {
   241  	testIterator(t, false)
   242  }
   243  func TestMockIterator(t *testing.T) {
   244  	testIterator(t, true)
   245  }
   246  
   247  func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
   248  	db, cleanup, err := newTestDbStore(mock, true)
   249  	defer cleanup()
   250  	if err != nil {
   251  		b.Fatalf("init dbStore failed: %v", err)
   252  	}
   253  	benchmarkStorePut(db, n, b)
   254  }
   255  
   256  func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
   257  	db, cleanup, err := newTestDbStore(mock, true)
   258  	defer cleanup()
   259  	if err != nil {
   260  		b.Fatalf("init dbStore failed: %v", err)
   261  	}
   262  	benchmarkStoreGet(db, n, b)
   263  }
   264  
   265  func BenchmarkDbStorePut_500(b *testing.B) {
   266  	benchmarkDbStorePut(500, false, b)
   267  }
   268  
   269  func BenchmarkDbStoreGet_500(b *testing.B) {
   270  	benchmarkDbStoreGet(500, false, b)
   271  }
   272  
   273  func BenchmarkMockDbStorePut_500(b *testing.B) {
   274  	benchmarkDbStorePut(500, true, b)
   275  }
   276  
   277  func BenchmarkMockDbStoreGet_500(b *testing.B) {
   278  	benchmarkDbStoreGet(500, true, b)
   279  }
   280  
   281  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   282  // retrieve them, provided we don't hit the garbage collection
   283  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   284  	capacity := 50
   285  	n := 10
   286  
   287  	ldb, cleanup := newLDBStore(t)
   288  	ldb.setCapacity(uint64(capacity))
   289  	defer cleanup()
   290  
   291  	chunks, err := mputRandomChunks(ldb, n)
   292  	if err != nil {
   293  		t.Fatal(err.Error())
   294  	}
   295  
   296  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   297  
   298  	for _, ch := range chunks {
   299  		ret, err := ldb.Get(context.TODO(), ch.Address())
   300  		if err != nil {
   301  			t.Fatal(err)
   302  		}
   303  
   304  		if !bytes.Equal(ret.Data(), ch.Data()) {
   305  			t.Fatal("expected to get the same data back, but got smth else")
   306  		}
   307  	}
   308  
   309  	if ldb.entryCnt != uint64(n) {
   310  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
   311  	}
   312  
   313  	if ldb.accessCnt != uint64(2*n) {
   314  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
   315  	}
   316  }
   317  
   318  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   319  // retrieve only some of them, because garbage collection must have partially cleared the store
   320  // Also tests that we can delete chunks and that we can trigger garbage collection
   321  func TestLDBStoreCollectGarbage(t *testing.T) {
   322  
   323  	// below max ronud
   324  	initialCap := defaultMaxGCRound / 100
   325  	cap := initialCap / 2
   326  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   327  
   328  	if testutil.RaceEnabled {
   329  		t.Skip("only the simplest case run as others are flaky with race")
   330  		// Note: some tests fail consistently and even locally with `-race`
   331  	}
   332  
   333  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   334  
   335  	// at max round
   336  	cap = initialCap
   337  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   338  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   339  
   340  	// more than max around, not on threshold
   341  	cap = initialCap + 500
   342  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   343  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   344  
   345  }
   346  
   347  func testLDBStoreCollectGarbage(t *testing.T) {
   348  	params := strings.Split(t.Name(), "/")
   349  	capacity, err := strconv.Atoi(params[2])
   350  	if err != nil {
   351  		t.Fatal(err)
   352  	}
   353  	n, err := strconv.Atoi(params[3])
   354  	if err != nil {
   355  		t.Fatal(err)
   356  	}
   357  
   358  	ldb, cleanup := newLDBStore(t)
   359  	ldb.setCapacity(uint64(capacity))
   360  	defer cleanup()
   361  
   362  	// retrieve the gc round target count for the db capacity
   363  	ldb.startGC(capacity)
   364  	roundTarget := ldb.gc.target
   365  
   366  	// split put counts to gc target count threshold, and wait for gc to finish in between
   367  	var allChunks []Chunk
   368  	remaining := n
   369  	for remaining > 0 {
   370  		var putCount int
   371  		if remaining < roundTarget {
   372  			putCount = remaining
   373  		} else {
   374  			putCount = roundTarget
   375  		}
   376  		remaining -= putCount
   377  		chunks, err := mputRandomChunks(ldb, putCount)
   378  		if err != nil {
   379  			t.Fatal(err.Error())
   380  		}
   381  		allChunks = append(allChunks, chunks...)
   382  		ldb.lock.RLock()
   383  		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
   384  		ldb.lock.RUnlock()
   385  
   386  		waitGc(ldb)
   387  	}
   388  
   389  	// attempt gets on all put chunks
   390  	var missing int
   391  	for _, ch := range allChunks {
   392  		ret, err := ldb.Get(context.TODO(), ch.Address())
   393  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   394  			missing++
   395  			continue
   396  		}
   397  		if err != nil {
   398  			t.Fatal(err)
   399  		}
   400  
   401  		if !bytes.Equal(ret.Data(), ch.Data()) {
   402  			t.Fatal("expected to get the same data back, but got smth else")
   403  		}
   404  
   405  		log.Trace("got back chunk", "chunk", ret)
   406  	}
   407  
   408  	// all surplus chunks should be missing
   409  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   410  	if missing != expectMissing {
   411  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
   412  	}
   413  
   414  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   415  }
   416  
   417  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   418  func TestLDBStoreAddRemove(t *testing.T) {
   419  	ldb, cleanup := newLDBStore(t)
   420  	ldb.setCapacity(200)
   421  	defer cleanup()
   422  
   423  	n := 100
   424  	chunks, err := mputRandomChunks(ldb, n)
   425  	if err != nil {
   426  		t.Fatalf(err.Error())
   427  	}
   428  
   429  	for i := 0; i < n; i++ {
   430  		// delete all even index chunks
   431  		if i%2 == 0 {
   432  			ldb.Delete(chunks[i].Address())
   433  		}
   434  	}
   435  
   436  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   437  
   438  	for i := 0; i < n; i++ {
   439  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   440  
   441  		if i%2 == 0 {
   442  			// expect even chunks to be missing
   443  			if err == nil {
   444  				t.Fatal("expected chunk to be missing, but got no error")
   445  			}
   446  		} else {
   447  			// expect odd chunks to be retrieved successfully
   448  			if err != nil {
   449  				t.Fatalf("expected no error, but got %s", err)
   450  			}
   451  
   452  			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   453  				t.Fatal("expected to get the same data back, but got smth else")
   454  			}
   455  		}
   456  	}
   457  }
   458  
   459  func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   460  	t.Skip("flaky with -race flag")
   461  
   462  	params := strings.Split(t.Name(), "/")
   463  	capacity, err := strconv.Atoi(params[2])
   464  	if err != nil {
   465  		t.Fatal(err)
   466  	}
   467  	n, err := strconv.Atoi(params[3])
   468  	if err != nil {
   469  		t.Fatal(err)
   470  	}
   471  
   472  	ldb, cleanup := newLDBStore(t)
   473  	defer cleanup()
   474  	ldb.setCapacity(uint64(capacity))
   475  
   476  	// put capacity count number of chunks
   477  	chunks := make([]Chunk, n)
   478  	for i := 0; i < n; i++ {
   479  		c := GenerateRandomChunk(chunk.DefaultSize)
   480  		chunks[i] = c
   481  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   482  	}
   483  
   484  	for i := 0; i < n; i++ {
   485  		err := ldb.Put(context.TODO(), chunks[i])
   486  		if err != nil {
   487  			t.Fatal(err)
   488  		}
   489  	}
   490  
   491  	waitGc(ldb)
   492  
   493  	// delete all chunks
   494  	// (only count the ones actually deleted, the rest will have been gc'd)
   495  	deletes := 0
   496  	for i := 0; i < n; i++ {
   497  		if ldb.Delete(chunks[i].Address()) == nil {
   498  			deletes++
   499  		}
   500  	}
   501  
   502  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   503  
   504  	if ldb.entryCnt != 0 {
   505  		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
   506  	}
   507  
   508  	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
   509  	expAccessCnt := uint64(n)
   510  	if ldb.accessCnt != expAccessCnt {
   511  		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
   512  	}
   513  
   514  	// retrieve the gc round target count for the db capacity
   515  	ldb.startGC(capacity)
   516  	roundTarget := ldb.gc.target
   517  
   518  	remaining := n
   519  	var puts int
   520  	for remaining > 0 {
   521  		var putCount int
   522  		if remaining < roundTarget {
   523  			putCount = remaining
   524  		} else {
   525  			putCount = roundTarget
   526  		}
   527  		remaining -= putCount
   528  		for putCount > 0 {
   529  			ldb.Put(context.TODO(), chunks[puts])
   530  			ldb.lock.RLock()
   531  			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
   532  			ldb.lock.RUnlock()
   533  			puts++
   534  			putCount--
   535  		}
   536  
   537  		waitGc(ldb)
   538  	}
   539  
   540  	// expect first surplus chunks to be missing, because they have the smallest access value
   541  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   542  	for i := 0; i < expectMissing; i++ {
   543  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   544  		if err == nil {
   545  			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
   546  		}
   547  	}
   548  
   549  	// expect last chunks to be present, as they have the largest access value
   550  	for i := expectMissing; i < n; i++ {
   551  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   552  		if err != nil {
   553  			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
   554  		}
   555  		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   556  			t.Fatal("expected to get the same data back, but got smth else")
   557  		}
   558  	}
   559  }
   560  
   561  // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
   562  func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
   563  
   564  	capacity := defaultMaxGCRound / 100 * 2
   565  	n := capacity - 1
   566  
   567  	ldb, cleanup := newLDBStore(t)
   568  	ldb.setCapacity(uint64(capacity))
   569  	defer cleanup()
   570  
   571  	chunks, err := mputRandomChunks(ldb, n)
   572  	if err != nil {
   573  		t.Fatal(err.Error())
   574  	}
   575  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   576  
   577  	// set first added capacity/2 chunks to highest accesscount
   578  	for i := 0; i < capacity/2; i++ {
   579  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   580  		if err != nil {
   581  			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
   582  		}
   583  	}
   584  	_, err = mputRandomChunks(ldb, 2)
   585  	if err != nil {
   586  		t.Fatal(err.Error())
   587  	}
   588  
   589  	// wait for garbage collection to kick in on the responsible actor
   590  	waitGc(ldb)
   591  
   592  	var missing int
   593  	for i, ch := range chunks[2 : capacity/2] {
   594  		ret, err := ldb.Get(context.TODO(), ch.Address())
   595  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   596  			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
   597  		}
   598  
   599  		if !bytes.Equal(ret.Data(), ch.Data()) {
   600  			t.Fatal("expected to get the same data back, but got smth else")
   601  		}
   602  		log.Trace("got back chunk", "chunk", ret)
   603  	}
   604  
   605  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   606  }
   607  
   608  func TestCleanIndex(t *testing.T) {
   609  	capacity := 5000
   610  	n := 3
   611  
   612  	ldb, cleanup := newLDBStore(t)
   613  	ldb.setCapacity(uint64(capacity))
   614  	defer cleanup()
   615  
   616  	chunks, err := mputRandomChunks(ldb, n)
   617  	if err != nil {
   618  		t.Fatal(err)
   619  	}
   620  
   621  	// remove the data of the first chunk
   622  	po := ldb.po(chunks[0].Address()[:])
   623  	dataKey := make([]byte, 10)
   624  	dataKey[0] = keyData
   625  	dataKey[1] = byte(po)
   626  	// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
   627  	if _, err := ldb.db.Get(dataKey); err != nil {
   628  		t.Fatal(err)
   629  	}
   630  	if err := ldb.db.Delete(dataKey); err != nil {
   631  		t.Fatal(err)
   632  	}
   633  
   634  	// remove the gc index row for the first chunk
   635  	gcFirstCorrectKey := make([]byte, 9)
   636  	gcFirstCorrectKey[0] = keyGCIdx
   637  	if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
   638  		t.Fatal(err)
   639  	}
   640  
   641  	// warp the gc data of the second chunk
   642  	// this data should be correct again after the clean
   643  	gcSecondCorrectKey := make([]byte, 9)
   644  	gcSecondCorrectKey[0] = keyGCIdx
   645  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
   646  	gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
   647  	if err != nil {
   648  		t.Fatal(err)
   649  	}
   650  	warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
   651  	copy(warpedGCVal[1:], gcSecondCorrectVal)
   652  	if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
   653  		t.Fatal(err)
   654  	}
   655  	if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
   656  		t.Fatal(err)
   657  	}
   658  
   659  	if err := ldb.CleanGCIndex(); err != nil {
   660  		t.Fatal(err)
   661  	}
   662  
   663  	// the index without corresponding data should have been deleted
   664  	idxKey := make([]byte, 33)
   665  	idxKey[0] = keyIndex
   666  	copy(idxKey[1:], chunks[0].Address())
   667  	if _, err := ldb.db.Get(idxKey); err == nil {
   668  		t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
   669  	}
   670  
   671  	// the two other indices should be present
   672  	copy(idxKey[1:], chunks[1].Address())
   673  	if _, err := ldb.db.Get(idxKey); err != nil {
   674  		t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
   675  	}
   676  
   677  	copy(idxKey[1:], chunks[2].Address())
   678  	if _, err := ldb.db.Get(idxKey); err != nil {
   679  		t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
   680  	}
   681  
   682  	// first gc index should still be gone
   683  	if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
   684  		t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
   685  	}
   686  
   687  	// second gc index should still be fixed
   688  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   689  		t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
   690  	}
   691  
   692  	// third gc index should be unchanged
   693  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
   694  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   695  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   696  	}
   697  
   698  	c, err := ldb.db.Get(keyEntryCnt)
   699  	if err != nil {
   700  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   701  	}
   702  
   703  	// entrycount should now be one less
   704  	entryCount := binary.BigEndian.Uint64(c)
   705  	if entryCount != 2 {
   706  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   707  	}
   708  
   709  	// the chunks might accidentally be in the same bin
   710  	// if so that bin counter will now be 2 - the highest added index.
   711  	// if not, the total of them will be 3
   712  	poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
   713  	if poBins[0] == poBins[1] {
   714  		poBins = poBins[:1]
   715  	}
   716  
   717  	var binTotal uint64
   718  	var currentBin [2]byte
   719  	currentBin[0] = keyDistanceCnt
   720  	if len(poBins) == 1 {
   721  		currentBin[1] = poBins[0]
   722  		c, err := ldb.db.Get(currentBin[:])
   723  		if err != nil {
   724  			t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   725  		}
   726  		binCount := binary.BigEndian.Uint64(c)
   727  		if binCount != 2 {
   728  			t.Fatalf("expected entrycnt to be 2, was %d", binCount)
   729  		}
   730  	} else {
   731  		for _, bin := range poBins {
   732  			currentBin[1] = bin
   733  			c, err := ldb.db.Get(currentBin[:])
   734  			if err != nil {
   735  				t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   736  			}
   737  			binCount := binary.BigEndian.Uint64(c)
   738  			binTotal += binCount
   739  
   740  		}
   741  		if binTotal != 3 {
   742  			t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
   743  		}
   744  	}
   745  
   746  	// check that the iterator quits properly
   747  	chunks, err = mputRandomChunks(ldb, 4100)
   748  	if err != nil {
   749  		t.Fatal(err)
   750  	}
   751  
   752  	po = ldb.po(chunks[4099].Address()[:])
   753  	dataKey = make([]byte, 10)
   754  	dataKey[0] = keyData
   755  	dataKey[1] = byte(po)
   756  	binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
   757  	if _, err := ldb.db.Get(dataKey); err != nil {
   758  		t.Fatal(err)
   759  	}
   760  	if err := ldb.db.Delete(dataKey); err != nil {
   761  		t.Fatal(err)
   762  	}
   763  
   764  	if err := ldb.CleanGCIndex(); err != nil {
   765  		t.Fatal(err)
   766  	}
   767  
   768  	// entrycount should now be one less of added chunks
   769  	c, err = ldb.db.Get(keyEntryCnt)
   770  	if err != nil {
   771  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   772  	}
   773  	entryCount = binary.BigEndian.Uint64(c)
   774  	if entryCount != 4099+2 {
   775  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   776  	}
   777  }
   778  
   779  // Note: waitGc does not guarantee that we wait 1 GC round; it only
   780  // guarantees that if the GC is running we wait for that run to finish
   781  // ticket: https://github.com/ethersphere/go-ethereum/issues/1151
   782  func waitGc(ldb *LDBStore) {
   783  	<-ldb.gc.runC
   784  	ldb.gc.runC <- struct{}{}
   785  }