github.com/insight-chain/inb-go@v1.1.3-0.20191221022159-da049980ae38/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"os"
    26  	"strconv"
    27  	"strings"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/insight-chain/inb-go/common"
    32  	ch "github.com/insight-chain/inb-go/swarm/chunk"
    33  	"github.com/insight-chain/inb-go/swarm/log"
    34  	"github.com/insight-chain/inb-go/swarm/storage/mock/mem"
    35  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    36  )
    37  
    38  type testDbStore struct {
    39  	*LDBStore
    40  	dir string
    41  }
    42  
    43  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    44  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    45  	if err != nil {
    46  		return nil, func() {}, err
    47  	}
    48  
    49  	var db *LDBStore
    50  	storeparams := NewDefaultStoreParams()
    51  	params := NewLDBStoreParams(storeparams, dir)
    52  	params.Po = testPoFunc
    53  
    54  	if mock {
    55  		globalStore := mem.NewGlobalStore()
    56  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    57  		mockStore := globalStore.NewNodeStore(addr)
    58  
    59  		db, err = NewMockDbStore(params, mockStore)
    60  	} else {
    61  		db, err = NewLDBStore(params)
    62  	}
    63  
    64  	cleanup := func() {
    65  		if db != nil {
    66  			db.Close()
    67  		}
    68  		err = os.RemoveAll(dir)
    69  		if err != nil {
    70  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    71  		}
    72  	}
    73  
    74  	return &testDbStore{db, dir}, cleanup, err
    75  }
    76  
    77  func testPoFunc(k Address) (ret uint8) {
    78  	basekey := make([]byte, 32)
    79  	return uint8(Proximity(basekey, k[:]))
    80  }
    81  
    82  func (db *testDbStore) close() {
    83  	db.Close()
    84  	err := os.RemoveAll(db.dir)
    85  	if err != nil {
    86  		panic(err)
    87  	}
    88  }
    89  
    90  func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
    91  	db, cleanup, err := newTestDbStore(mock, true)
    92  	defer cleanup()
    93  	if err != nil {
    94  		t.Fatalf("init dbStore failed: %v", err)
    95  	}
    96  	testStoreRandom(db, n, chunksize, t)
    97  }
    98  
    99  func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) {
   100  	db, cleanup, err := newTestDbStore(mock, false)
   101  	defer cleanup()
   102  	if err != nil {
   103  		t.Fatalf("init dbStore failed: %v", err)
   104  	}
   105  	testStoreCorrect(db, n, chunksize, t)
   106  }
   107  
   108  func TestMarkAccessed(t *testing.T) {
   109  	db, cleanup, err := newTestDbStore(false, true)
   110  	defer cleanup()
   111  	if err != nil {
   112  		t.Fatalf("init dbStore failed: %v", err)
   113  	}
   114  
   115  	h := GenerateRandomChunk(ch.DefaultSize)
   116  
   117  	db.Put(context.Background(), h)
   118  
   119  	var index dpaDBIndex
   120  	addr := h.Address()
   121  	idxk := getIndexKey(addr)
   122  
   123  	idata, err := db.db.Get(idxk)
   124  	if err != nil {
   125  		t.Fatal(err)
   126  	}
   127  	decodeIndex(idata, &index)
   128  
   129  	if index.Access != 0 {
   130  		t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
   131  	}
   132  
   133  	db.MarkAccessed(addr)
   134  	db.writeCurrentBatch()
   135  
   136  	idata, err = db.db.Get(idxk)
   137  	if err != nil {
   138  		t.Fatal(err)
   139  	}
   140  	decodeIndex(idata, &index)
   141  
   142  	if index.Access != 1 {
   143  		t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
   144  	}
   145  
   146  }
   147  
   148  func TestDbStoreRandom_1(t *testing.T) {
   149  	testDbStoreRandom(1, 0, false, t)
   150  }
   151  
   152  func TestDbStoreCorrect_1(t *testing.T) {
   153  	testDbStoreCorrect(1, 4096, false, t)
   154  }
   155  
   156  func TestDbStoreRandom_1k(t *testing.T) {
   157  	testDbStoreRandom(1000, 0, false, t)
   158  }
   159  
   160  func TestDbStoreCorrect_1k(t *testing.T) {
   161  	testDbStoreCorrect(1000, 4096, false, t)
   162  }
   163  
   164  func TestMockDbStoreRandom_1(t *testing.T) {
   165  	testDbStoreRandom(1, 0, true, t)
   166  }
   167  
   168  func TestMockDbStoreCorrect_1(t *testing.T) {
   169  	testDbStoreCorrect(1, 4096, true, t)
   170  }
   171  
   172  func TestMockDbStoreRandom_1k(t *testing.T) {
   173  	testDbStoreRandom(1000, 0, true, t)
   174  }
   175  
   176  func TestMockDbStoreCorrect_1k(t *testing.T) {
   177  	testDbStoreCorrect(1000, 4096, true, t)
   178  }
   179  
   180  func testDbStoreNotFound(t *testing.T, mock bool) {
   181  	db, cleanup, err := newTestDbStore(mock, false)
   182  	defer cleanup()
   183  	if err != nil {
   184  		t.Fatalf("init dbStore failed: %v", err)
   185  	}
   186  
   187  	_, err = db.Get(context.TODO(), ZeroAddr)
   188  	if err != ErrChunkNotFound {
   189  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   190  	}
   191  }
   192  
   193  func TestDbStoreNotFound(t *testing.T) {
   194  	testDbStoreNotFound(t, false)
   195  }
   196  func TestMockDbStoreNotFound(t *testing.T) {
   197  	testDbStoreNotFound(t, true)
   198  }
   199  
   200  func testIterator(t *testing.T, mock bool) {
   201  	var chunkcount int = 32
   202  	var i int
   203  	var poc uint
   204  	chunkkeys := NewAddressCollection(chunkcount)
   205  	chunkkeys_results := NewAddressCollection(chunkcount)
   206  
   207  	db, cleanup, err := newTestDbStore(mock, false)
   208  	defer cleanup()
   209  	if err != nil {
   210  		t.Fatalf("init dbStore failed: %v", err)
   211  	}
   212  
   213  	chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
   214  
   215  	for i = 0; i < len(chunks); i++ {
   216  		chunkkeys[i] = chunks[i].Address()
   217  		err := db.Put(context.TODO(), chunks[i])
   218  		if err != nil {
   219  			t.Fatalf("dbStore.Put failed: %v", err)
   220  		}
   221  	}
   222  
   223  	for i = 0; i < len(chunkkeys); i++ {
   224  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   225  	}
   226  	i = 0
   227  	for poc = 0; poc <= 255; poc++ {
   228  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   229  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   230  			chunkkeys_results[n] = k
   231  			i++
   232  			return true
   233  		})
   234  		if err != nil {
   235  			t.Fatalf("Iterator call failed: %v", err)
   236  		}
   237  	}
   238  
   239  	for i = 0; i < chunkcount; i++ {
   240  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   241  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   242  		}
   243  	}
   244  
   245  }
   246  
   247  func TestIterator(t *testing.T) {
   248  	testIterator(t, false)
   249  }
   250  func TestMockIterator(t *testing.T) {
   251  	testIterator(t, true)
   252  }
   253  
   254  func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   255  	db, cleanup, err := newTestDbStore(mock, true)
   256  	defer cleanup()
   257  	if err != nil {
   258  		b.Fatalf("init dbStore failed: %v", err)
   259  	}
   260  	benchmarkStorePut(db, n, chunksize, b)
   261  }
   262  
   263  func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   264  	db, cleanup, err := newTestDbStore(mock, true)
   265  	defer cleanup()
   266  	if err != nil {
   267  		b.Fatalf("init dbStore failed: %v", err)
   268  	}
   269  	benchmarkStoreGet(db, n, chunksize, b)
   270  }
   271  
   272  func BenchmarkDbStorePut_1_500(b *testing.B) {
   273  	benchmarkDbStorePut(500, 1, 4096, false, b)
   274  }
   275  
   276  func BenchmarkDbStorePut_8_500(b *testing.B) {
   277  	benchmarkDbStorePut(500, 8, 4096, false, b)
   278  }
   279  
   280  func BenchmarkDbStoreGet_1_500(b *testing.B) {
   281  	benchmarkDbStoreGet(500, 1, 4096, false, b)
   282  }
   283  
   284  func BenchmarkDbStoreGet_8_500(b *testing.B) {
   285  	benchmarkDbStoreGet(500, 8, 4096, false, b)
   286  }
   287  
   288  func BenchmarkMockDbStorePut_1_500(b *testing.B) {
   289  	benchmarkDbStorePut(500, 1, 4096, true, b)
   290  }
   291  
   292  func BenchmarkMockDbStorePut_8_500(b *testing.B) {
   293  	benchmarkDbStorePut(500, 8, 4096, true, b)
   294  }
   295  
   296  func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
   297  	benchmarkDbStoreGet(500, 1, 4096, true, b)
   298  }
   299  
   300  func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
   301  	benchmarkDbStoreGet(500, 8, 4096, true, b)
   302  }
   303  
   304  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   305  // retrieve them, provided we don't hit the garbage collection
   306  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   307  	capacity := 50
   308  	n := 10
   309  
   310  	ldb, cleanup := newLDBStore(t)
   311  	ldb.setCapacity(uint64(capacity))
   312  	defer cleanup()
   313  
   314  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   315  	if err != nil {
   316  		t.Fatal(err.Error())
   317  	}
   318  
   319  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   320  
   321  	for _, ch := range chunks {
   322  		ret, err := ldb.Get(context.TODO(), ch.Address())
   323  		if err != nil {
   324  			t.Fatal(err)
   325  		}
   326  
   327  		if !bytes.Equal(ret.Data(), ch.Data()) {
   328  			t.Fatal("expected to get the same data back, but got smth else")
   329  		}
   330  	}
   331  
   332  	if ldb.entryCnt != uint64(n) {
   333  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
   334  	}
   335  
   336  	if ldb.accessCnt != uint64(2*n) {
   337  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
   338  	}
   339  }
   340  
   341  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   342  // retrieve only some of them, because garbage collection must have partially cleared the store
   343  // Also tests that we can delete chunks and that we can trigger garbage collection
   344  func TestLDBStoreCollectGarbage(t *testing.T) {
   345  
   346  	// below max ronud
   347  	initialCap := defaultMaxGCRound / 100
   348  	cap := initialCap / 2
   349  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   350  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   351  
   352  	// at max round
   353  	cap = initialCap
   354  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   355  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   356  
   357  	// more than max around, not on threshold
   358  	cap = initialCap + 500
   359  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   360  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   361  
   362  }
   363  
   364  func testLDBStoreCollectGarbage(t *testing.T) {
   365  	params := strings.Split(t.Name(), "/")
   366  	capacity, err := strconv.Atoi(params[2])
   367  	if err != nil {
   368  		t.Fatal(err)
   369  	}
   370  	n, err := strconv.Atoi(params[3])
   371  	if err != nil {
   372  		t.Fatal(err)
   373  	}
   374  
   375  	ldb, cleanup := newLDBStore(t)
   376  	ldb.setCapacity(uint64(capacity))
   377  	defer cleanup()
   378  
   379  	// retrieve the gc round target count for the db capacity
   380  	ldb.startGC(capacity)
   381  	roundTarget := ldb.gc.target
   382  
   383  	// split put counts to gc target count threshold, and wait for gc to finish in between
   384  	var allChunks []Chunk
   385  	remaining := n
   386  	for remaining > 0 {
   387  		var putCount int
   388  		if remaining < roundTarget {
   389  			putCount = remaining
   390  		} else {
   391  			putCount = roundTarget
   392  		}
   393  		remaining -= putCount
   394  		chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize))
   395  		if err != nil {
   396  			t.Fatal(err.Error())
   397  		}
   398  		allChunks = append(allChunks, chunks...)
   399  		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
   400  
   401  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   402  		defer cancel()
   403  		waitGc(ctx, ldb)
   404  	}
   405  
   406  	// attempt gets on all put chunks
   407  	var missing int
   408  	for _, ch := range allChunks {
   409  		ret, err := ldb.Get(context.TODO(), ch.Address())
   410  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   411  			missing++
   412  			continue
   413  		}
   414  		if err != nil {
   415  			t.Fatal(err)
   416  		}
   417  
   418  		if !bytes.Equal(ret.Data(), ch.Data()) {
   419  			t.Fatal("expected to get the same data back, but got smth else")
   420  		}
   421  
   422  		log.Trace("got back chunk", "chunk", ret)
   423  	}
   424  
   425  	// all surplus chunks should be missing
   426  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   427  	if missing != expectMissing {
   428  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
   429  	}
   430  
   431  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   432  }
   433  
   434  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   435  func TestLDBStoreAddRemove(t *testing.T) {
   436  	ldb, cleanup := newLDBStore(t)
   437  	ldb.setCapacity(200)
   438  	defer cleanup()
   439  
   440  	n := 100
   441  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   442  	if err != nil {
   443  		t.Fatalf(err.Error())
   444  	}
   445  
   446  	for i := 0; i < n; i++ {
   447  		// delete all even index chunks
   448  		if i%2 == 0 {
   449  			ldb.Delete(chunks[i].Address())
   450  		}
   451  	}
   452  
   453  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   454  
   455  	for i := 0; i < n; i++ {
   456  		ret, err := ldb.Get(nil, chunks[i].Address())
   457  
   458  		if i%2 == 0 {
   459  			// expect even chunks to be missing
   460  			if err == nil {
   461  				t.Fatal("expected chunk to be missing, but got no error")
   462  			}
   463  		} else {
   464  			// expect odd chunks to be retrieved successfully
   465  			if err != nil {
   466  				t.Fatalf("expected no error, but got %s", err)
   467  			}
   468  
   469  			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   470  				t.Fatal("expected to get the same data back, but got smth else")
   471  			}
   472  		}
   473  	}
   474  }
   475  
   476  func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   477  
   478  	params := strings.Split(t.Name(), "/")
   479  	capacity, err := strconv.Atoi(params[2])
   480  	if err != nil {
   481  		t.Fatal(err)
   482  	}
   483  	n, err := strconv.Atoi(params[3])
   484  	if err != nil {
   485  		t.Fatal(err)
   486  	}
   487  
   488  	ldb, cleanup := newLDBStore(t)
   489  	defer cleanup()
   490  	ldb.setCapacity(uint64(capacity))
   491  
   492  	// put capacity count number of chunks
   493  	chunks := make([]Chunk, n)
   494  	for i := 0; i < n; i++ {
   495  		c := GenerateRandomChunk(ch.DefaultSize)
   496  		chunks[i] = c
   497  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   498  	}
   499  
   500  	for i := 0; i < n; i++ {
   501  		err := ldb.Put(context.TODO(), chunks[i])
   502  		if err != nil {
   503  			t.Fatal(err)
   504  		}
   505  	}
   506  
   507  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   508  	defer cancel()
   509  	waitGc(ctx, ldb)
   510  
   511  	// delete all chunks
   512  	// (only count the ones actually deleted, the rest will have been gc'd)
   513  	deletes := 0
   514  	for i := 0; i < n; i++ {
   515  		if ldb.Delete(chunks[i].Address()) == nil {
   516  			deletes++
   517  		}
   518  	}
   519  
   520  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   521  
   522  	if ldb.entryCnt != 0 {
   523  		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
   524  	}
   525  
   526  	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
   527  	expAccessCnt := uint64(n)
   528  	if ldb.accessCnt != expAccessCnt {
   529  		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
   530  	}
   531  
   532  	// retrieve the gc round target count for the db capacity
   533  	ldb.startGC(capacity)
   534  	roundTarget := ldb.gc.target
   535  
   536  	remaining := n
   537  	var puts int
   538  	for remaining > 0 {
   539  		var putCount int
   540  		if remaining < roundTarget {
   541  			putCount = remaining
   542  		} else {
   543  			putCount = roundTarget
   544  		}
   545  		remaining -= putCount
   546  		for putCount > 0 {
   547  			ldb.Put(context.TODO(), chunks[puts])
   548  			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
   549  			puts++
   550  			putCount--
   551  		}
   552  
   553  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   554  		defer cancel()
   555  		waitGc(ctx, ldb)
   556  	}
   557  
   558  	// expect first surplus chunks to be missing, because they have the smallest access value
   559  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   560  	for i := 0; i < expectMissing; i++ {
   561  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   562  		if err == nil {
   563  			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
   564  		}
   565  	}
   566  
   567  	// expect last chunks to be present, as they have the largest access value
   568  	for i := expectMissing; i < n; i++ {
   569  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   570  		if err != nil {
   571  			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
   572  		}
   573  		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   574  			t.Fatal("expected to get the same data back, but got smth else")
   575  		}
   576  	}
   577  }
   578  
   579  // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
   580  func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
   581  
   582  	capacity := defaultMaxGCRound / 100 * 2
   583  	n := capacity - 1
   584  
   585  	ldb, cleanup := newLDBStore(t)
   586  	ldb.setCapacity(uint64(capacity))
   587  	defer cleanup()
   588  
   589  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   590  	if err != nil {
   591  		t.Fatal(err.Error())
   592  	}
   593  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   594  
   595  	// set first added capacity/2 chunks to highest accesscount
   596  	for i := 0; i < capacity/2; i++ {
   597  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   598  		if err != nil {
   599  			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
   600  		}
   601  	}
   602  	_, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize))
   603  	if err != nil {
   604  		t.Fatal(err.Error())
   605  	}
   606  
   607  	// wait for garbage collection to kick in on the responsible actor
   608  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   609  	defer cancel()
   610  	waitGc(ctx, ldb)
   611  
   612  	var missing int
   613  	for i, ch := range chunks[2 : capacity/2] {
   614  		ret, err := ldb.Get(context.TODO(), ch.Address())
   615  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   616  			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
   617  		}
   618  
   619  		if !bytes.Equal(ret.Data(), ch.Data()) {
   620  			t.Fatal("expected to get the same data back, but got smth else")
   621  		}
   622  		log.Trace("got back chunk", "chunk", ret)
   623  	}
   624  
   625  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   626  }
   627  
   628  func TestCleanIndex(t *testing.T) {
   629  	capacity := 5000
   630  	n := 3
   631  
   632  	ldb, cleanup := newLDBStore(t)
   633  	ldb.setCapacity(uint64(capacity))
   634  	defer cleanup()
   635  
   636  	chunks, err := mputRandomChunks(ldb, n, 4096)
   637  	if err != nil {
   638  		t.Fatal(err)
   639  	}
   640  
   641  	// remove the data of the first chunk
   642  	po := ldb.po(chunks[0].Address()[:])
   643  	dataKey := make([]byte, 10)
   644  	dataKey[0] = keyData
   645  	dataKey[1] = byte(po)
   646  	// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
   647  	if _, err := ldb.db.Get(dataKey); err != nil {
   648  		t.Fatal(err)
   649  	}
   650  	if err := ldb.db.Delete(dataKey); err != nil {
   651  		t.Fatal(err)
   652  	}
   653  
   654  	// remove the gc index row for the first chunk
   655  	gcFirstCorrectKey := make([]byte, 9)
   656  	gcFirstCorrectKey[0] = keyGCIdx
   657  	if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
   658  		t.Fatal(err)
   659  	}
   660  
   661  	// warp the gc data of the second chunk
   662  	// this data should be correct again after the clean
   663  	gcSecondCorrectKey := make([]byte, 9)
   664  	gcSecondCorrectKey[0] = keyGCIdx
   665  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
   666  	gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
   667  	if err != nil {
   668  		t.Fatal(err)
   669  	}
   670  	warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
   671  	copy(warpedGCVal[1:], gcSecondCorrectVal)
   672  	if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
   673  		t.Fatal(err)
   674  	}
   675  	if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
   676  		t.Fatal(err)
   677  	}
   678  
   679  	if err := ldb.CleanGCIndex(); err != nil {
   680  		t.Fatal(err)
   681  	}
   682  
   683  	// the index without corresponding data should have been deleted
   684  	idxKey := make([]byte, 33)
   685  	idxKey[0] = keyIndex
   686  	copy(idxKey[1:], chunks[0].Address())
   687  	if _, err := ldb.db.Get(idxKey); err == nil {
   688  		t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
   689  	}
   690  
   691  	// the two other indices should be present
   692  	copy(idxKey[1:], chunks[1].Address())
   693  	if _, err := ldb.db.Get(idxKey); err != nil {
   694  		t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
   695  	}
   696  
   697  	copy(idxKey[1:], chunks[2].Address())
   698  	if _, err := ldb.db.Get(idxKey); err != nil {
   699  		t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
   700  	}
   701  
   702  	// first gc index should still be gone
   703  	if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
   704  		t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
   705  	}
   706  
   707  	// second gc index should still be fixed
   708  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   709  		t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
   710  	}
   711  
   712  	// third gc index should be unchanged
   713  	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
   714  	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
   715  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   716  	}
   717  
   718  	c, err := ldb.db.Get(keyEntryCnt)
   719  	if err != nil {
   720  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   721  	}
   722  
   723  	// entrycount should now be one less
   724  	entryCount := binary.BigEndian.Uint64(c)
   725  	if entryCount != 2 {
   726  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   727  	}
   728  
   729  	// the chunks might accidentally be in the same bin
   730  	// if so that bin counter will now be 2 - the highest added index.
   731  	// if not, the total of them will be 3
   732  	poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
   733  	if poBins[0] == poBins[1] {
   734  		poBins = poBins[:1]
   735  	}
   736  
   737  	var binTotal uint64
   738  	var currentBin [2]byte
   739  	currentBin[0] = keyDistanceCnt
   740  	if len(poBins) == 1 {
   741  		currentBin[1] = poBins[0]
   742  		c, err := ldb.db.Get(currentBin[:])
   743  		if err != nil {
   744  			t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   745  		}
   746  		binCount := binary.BigEndian.Uint64(c)
   747  		if binCount != 2 {
   748  			t.Fatalf("expected entrycnt to be 2, was %d", binCount)
   749  		}
   750  	} else {
   751  		for _, bin := range poBins {
   752  			currentBin[1] = bin
   753  			c, err := ldb.db.Get(currentBin[:])
   754  			if err != nil {
   755  				t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   756  			}
   757  			binCount := binary.BigEndian.Uint64(c)
   758  			binTotal += binCount
   759  
   760  		}
   761  		if binTotal != 3 {
   762  			t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
   763  		}
   764  	}
   765  
   766  	// check that the iterator quits properly
   767  	chunks, err = mputRandomChunks(ldb, 4100, 4096)
   768  	if err != nil {
   769  		t.Fatal(err)
   770  	}
   771  
   772  	po = ldb.po(chunks[4099].Address()[:])
   773  	dataKey = make([]byte, 10)
   774  	dataKey[0] = keyData
   775  	dataKey[1] = byte(po)
   776  	binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
   777  	if _, err := ldb.db.Get(dataKey); err != nil {
   778  		t.Fatal(err)
   779  	}
   780  	if err := ldb.db.Delete(dataKey); err != nil {
   781  		t.Fatal(err)
   782  	}
   783  
   784  	if err := ldb.CleanGCIndex(); err != nil {
   785  		t.Fatal(err)
   786  	}
   787  
   788  	// entrycount should now be one less of added chunks
   789  	c, err = ldb.db.Get(keyEntryCnt)
   790  	if err != nil {
   791  		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
   792  	}
   793  	entryCount = binary.BigEndian.Uint64(c)
   794  	if entryCount != 4099+2 {
   795  		t.Fatalf("expected entrycnt to be 2, was %d", c)
   796  	}
   797  }
   798  
   799  func waitGc(ctx context.Context, ldb *LDBStore) {
   800  	<-ldb.gc.runC
   801  	ldb.gc.runC <- struct{}{}
   802  }