github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"os"
    25  	"strconv"
    26  	"strings"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	ch "github.com/ethereum/go-ethereum/swarm/chunk"
    32  	"github.com/ethereum/go-ethereum/swarm/log"
    33  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    34  
    35  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    36  )
    37  
    38  type testDbStore struct {
    39  	*LDBStore
    40  	dir string
    41  }
    42  
    43  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    44  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    45  	if err != nil {
    46  		return nil, func() {}, err
    47  	}
    48  
    49  	var db *LDBStore
    50  	storeparams := NewDefaultStoreParams()
    51  	params := NewLDBStoreParams(storeparams, dir)
    52  	params.Po = testPoFunc
    53  
    54  	if mock {
    55  		globalStore := mem.NewGlobalStore()
    56  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    57  		mockStore := globalStore.NewNodeStore(addr)
    58  
    59  		db, err = NewMockDbStore(params, mockStore)
    60  	} else {
    61  		db, err = NewLDBStore(params)
    62  	}
    63  
    64  	cleanup := func() {
    65  		if db != nil {
    66  			db.Close()
    67  		}
    68  		err = os.RemoveAll(dir)
    69  		if err != nil {
    70  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    71  		}
    72  	}
    73  
    74  	return &testDbStore{db, dir}, cleanup, err
    75  }
    76  
    77  func testPoFunc(k Address) (ret uint8) {
    78  	basekey := make([]byte, 32)
    79  	return uint8(Proximity(basekey, k[:]))
    80  }
    81  
    82  func (db *testDbStore) close() {
    83  	db.Close()
    84  	err := os.RemoveAll(db.dir)
    85  	if err != nil {
    86  		panic(err)
    87  	}
    88  }
    89  
    90  func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
    91  	db, cleanup, err := newTestDbStore(mock, true)
    92  	defer cleanup()
    93  	if err != nil {
    94  		t.Fatalf("init dbStore failed: %v", err)
    95  	}
    96  	testStoreRandom(db, n, chunksize, t)
    97  }
    98  
    99  func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) {
   100  	db, cleanup, err := newTestDbStore(mock, false)
   101  	defer cleanup()
   102  	if err != nil {
   103  		t.Fatalf("init dbStore failed: %v", err)
   104  	}
   105  	testStoreCorrect(db, n, chunksize, t)
   106  }
   107  
   108  func TestDbStoreRandom_1(t *testing.T) {
   109  	testDbStoreRandom(1, 0, false, t)
   110  }
   111  
   112  func TestDbStoreCorrect_1(t *testing.T) {
   113  	testDbStoreCorrect(1, 4096, false, t)
   114  }
   115  
   116  func TestDbStoreRandom_1k(t *testing.T) {
   117  	testDbStoreRandom(1000, 0, false, t)
   118  }
   119  
   120  func TestDbStoreCorrect_1k(t *testing.T) {
   121  	testDbStoreCorrect(1000, 4096, false, t)
   122  }
   123  
   124  func TestMockDbStoreRandom_1(t *testing.T) {
   125  	testDbStoreRandom(1, 0, true, t)
   126  }
   127  
   128  func TestMockDbStoreCorrect_1(t *testing.T) {
   129  	testDbStoreCorrect(1, 4096, true, t)
   130  }
   131  
   132  func TestMockDbStoreRandom_1k(t *testing.T) {
   133  	testDbStoreRandom(1000, 0, true, t)
   134  }
   135  
   136  func TestMockDbStoreCorrect_1k(t *testing.T) {
   137  	testDbStoreCorrect(1000, 4096, true, t)
   138  }
   139  
   140  func testDbStoreNotFound(t *testing.T, mock bool) {
   141  	db, cleanup, err := newTestDbStore(mock, false)
   142  	defer cleanup()
   143  	if err != nil {
   144  		t.Fatalf("init dbStore failed: %v", err)
   145  	}
   146  
   147  	_, err = db.Get(context.TODO(), ZeroAddr)
   148  	if err != ErrChunkNotFound {
   149  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   150  	}
   151  }
   152  
   153  func TestDbStoreNotFound(t *testing.T) {
   154  	testDbStoreNotFound(t, false)
   155  }
   156  func TestMockDbStoreNotFound(t *testing.T) {
   157  	testDbStoreNotFound(t, true)
   158  }
   159  
   160  func testIterator(t *testing.T, mock bool) {
   161  	var chunkcount int = 32
   162  	var i int
   163  	var poc uint
   164  	chunkkeys := NewAddressCollection(chunkcount)
   165  	chunkkeys_results := NewAddressCollection(chunkcount)
   166  
   167  	db, cleanup, err := newTestDbStore(mock, false)
   168  	defer cleanup()
   169  	if err != nil {
   170  		t.Fatalf("init dbStore failed: %v", err)
   171  	}
   172  
   173  	chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
   174  
   175  	for i = 0; i < len(chunks); i++ {
   176  		chunkkeys[i] = chunks[i].Address()
   177  		err := db.Put(context.TODO(), chunks[i])
   178  		if err != nil {
   179  			t.Fatalf("dbStore.Put failed: %v", err)
   180  		}
   181  	}
   182  
   183  	for i = 0; i < len(chunkkeys); i++ {
   184  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   185  	}
   186  	i = 0
   187  	for poc = 0; poc <= 255; poc++ {
   188  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   189  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   190  			chunkkeys_results[n] = k
   191  			i++
   192  			return true
   193  		})
   194  		if err != nil {
   195  			t.Fatalf("Iterator call failed: %v", err)
   196  		}
   197  	}
   198  
   199  	for i = 0; i < chunkcount; i++ {
   200  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   201  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   202  		}
   203  	}
   204  
   205  }
   206  
   207  func TestIterator(t *testing.T) {
   208  	testIterator(t, false)
   209  }
   210  func TestMockIterator(t *testing.T) {
   211  	testIterator(t, true)
   212  }
   213  
   214  func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   215  	db, cleanup, err := newTestDbStore(mock, true)
   216  	defer cleanup()
   217  	if err != nil {
   218  		b.Fatalf("init dbStore failed: %v", err)
   219  	}
   220  	benchmarkStorePut(db, n, chunksize, b)
   221  }
   222  
   223  func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   224  	db, cleanup, err := newTestDbStore(mock, true)
   225  	defer cleanup()
   226  	if err != nil {
   227  		b.Fatalf("init dbStore failed: %v", err)
   228  	}
   229  	benchmarkStoreGet(db, n, chunksize, b)
   230  }
   231  
   232  func BenchmarkDbStorePut_1_500(b *testing.B) {
   233  	benchmarkDbStorePut(500, 1, 4096, false, b)
   234  }
   235  
   236  func BenchmarkDbStorePut_8_500(b *testing.B) {
   237  	benchmarkDbStorePut(500, 8, 4096, false, b)
   238  }
   239  
   240  func BenchmarkDbStoreGet_1_500(b *testing.B) {
   241  	benchmarkDbStoreGet(500, 1, 4096, false, b)
   242  }
   243  
   244  func BenchmarkDbStoreGet_8_500(b *testing.B) {
   245  	benchmarkDbStoreGet(500, 8, 4096, false, b)
   246  }
   247  
   248  func BenchmarkMockDbStorePut_1_500(b *testing.B) {
   249  	benchmarkDbStorePut(500, 1, 4096, true, b)
   250  }
   251  
   252  func BenchmarkMockDbStorePut_8_500(b *testing.B) {
   253  	benchmarkDbStorePut(500, 8, 4096, true, b)
   254  }
   255  
   256  func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
   257  	benchmarkDbStoreGet(500, 1, 4096, true, b)
   258  }
   259  
   260  func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
   261  	benchmarkDbStoreGet(500, 8, 4096, true, b)
   262  }
   263  
   264  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   265  // retrieve them, provided we don't hit the garbage collection
   266  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   267  	capacity := 50
   268  	n := 10
   269  
   270  	ldb, cleanup := newLDBStore(t)
   271  	ldb.setCapacity(uint64(capacity))
   272  	defer cleanup()
   273  
   274  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   275  	if err != nil {
   276  		t.Fatal(err.Error())
   277  	}
   278  
   279  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   280  
   281  	for _, ch := range chunks {
   282  		ret, err := ldb.Get(context.TODO(), ch.Address())
   283  		if err != nil {
   284  			t.Fatal(err)
   285  		}
   286  
   287  		if !bytes.Equal(ret.Data(), ch.Data()) {
   288  			t.Fatal("expected to get the same data back, but got smth else")
   289  		}
   290  	}
   291  
   292  	if ldb.entryCnt != uint64(n) {
   293  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
   294  	}
   295  
   296  	if ldb.accessCnt != uint64(2*n) {
   297  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
   298  	}
   299  }
   300  
   301  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   302  // retrieve only some of them, because garbage collection must have partially cleared the store
   303  // Also tests that we can delete chunks and that we can trigger garbage collection
   304  func TestLDBStoreCollectGarbage(t *testing.T) {
   305  
   306  	// below max ronud
   307  	cap := defaultMaxGCRound / 2
   308  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   309  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   310  
   311  	// at max round
   312  	cap = defaultMaxGCRound
   313  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   314  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   315  
   316  	// more than max around, not on threshold
   317  	cap = defaultMaxGCRound * 1.1
   318  	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
   319  	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
   320  
   321  }
   322  
   323  func testLDBStoreCollectGarbage(t *testing.T) {
   324  	params := strings.Split(t.Name(), "/")
   325  	capacity, err := strconv.Atoi(params[2])
   326  	if err != nil {
   327  		t.Fatal(err)
   328  	}
   329  	n, err := strconv.Atoi(params[3])
   330  	if err != nil {
   331  		t.Fatal(err)
   332  	}
   333  
   334  	ldb, cleanup := newLDBStore(t)
   335  	ldb.setCapacity(uint64(capacity))
   336  	defer cleanup()
   337  
   338  	// retrieve the gc round target count for the db capacity
   339  	ldb.startGC(capacity)
   340  	roundTarget := ldb.gc.target
   341  
   342  	// split put counts to gc target count threshold, and wait for gc to finish in between
   343  	var allChunks []Chunk
   344  	remaining := n
   345  	for remaining > 0 {
   346  		var putCount int
   347  		if remaining < roundTarget {
   348  			putCount = remaining
   349  		} else {
   350  			putCount = roundTarget
   351  		}
   352  		remaining -= putCount
   353  		chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize))
   354  		if err != nil {
   355  			t.Fatal(err.Error())
   356  		}
   357  		allChunks = append(allChunks, chunks...)
   358  		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
   359  
   360  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   361  		defer cancel()
   362  		waitGc(ctx, ldb)
   363  	}
   364  
   365  	// attempt gets on all put chunks
   366  	var missing int
   367  	for _, ch := range allChunks {
   368  		ret, err := ldb.Get(context.TODO(), ch.Address())
   369  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   370  			missing++
   371  			continue
   372  		}
   373  		if err != nil {
   374  			t.Fatal(err)
   375  		}
   376  
   377  		if !bytes.Equal(ret.Data(), ch.Data()) {
   378  			t.Fatal("expected to get the same data back, but got smth else")
   379  		}
   380  
   381  		log.Trace("got back chunk", "chunk", ret)
   382  	}
   383  
   384  	// all surplus chunks should be missing
   385  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   386  	if missing != expectMissing {
   387  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
   388  	}
   389  
   390  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   391  }
   392  
   393  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   394  func TestLDBStoreAddRemove(t *testing.T) {
   395  	ldb, cleanup := newLDBStore(t)
   396  	ldb.setCapacity(200)
   397  	defer cleanup()
   398  
   399  	n := 100
   400  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   401  	if err != nil {
   402  		t.Fatalf(err.Error())
   403  	}
   404  
   405  	for i := 0; i < n; i++ {
   406  		// delete all even index chunks
   407  		if i%2 == 0 {
   408  			ldb.Delete(chunks[i].Address())
   409  		}
   410  	}
   411  
   412  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   413  
   414  	for i := 0; i < n; i++ {
   415  		ret, err := ldb.Get(nil, chunks[i].Address())
   416  
   417  		if i%2 == 0 {
   418  			// expect even chunks to be missing
   419  			if err == nil {
   420  				t.Fatal("expected chunk to be missing, but got no error")
   421  			}
   422  		} else {
   423  			// expect odd chunks to be retrieved successfully
   424  			if err != nil {
   425  				t.Fatalf("expected no error, but got %s", err)
   426  			}
   427  
   428  			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   429  				t.Fatal("expected to get the same data back, but got smth else")
   430  			}
   431  		}
   432  	}
   433  }
   434  
   435  func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   436  
   437  	params := strings.Split(t.Name(), "/")
   438  	capacity, err := strconv.Atoi(params[2])
   439  	if err != nil {
   440  		t.Fatal(err)
   441  	}
   442  	n, err := strconv.Atoi(params[3])
   443  	if err != nil {
   444  		t.Fatal(err)
   445  	}
   446  
   447  	ldb, cleanup := newLDBStore(t)
   448  	defer cleanup()
   449  	ldb.setCapacity(uint64(capacity))
   450  
   451  	// put capacity count number of chunks
   452  	chunks := make([]Chunk, n)
   453  	for i := 0; i < n; i++ {
   454  		c := GenerateRandomChunk(ch.DefaultSize)
   455  		chunks[i] = c
   456  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   457  	}
   458  
   459  	for i := 0; i < n; i++ {
   460  		err := ldb.Put(context.TODO(), chunks[i])
   461  		if err != nil {
   462  			t.Fatal(err)
   463  		}
   464  	}
   465  
   466  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   467  	defer cancel()
   468  	waitGc(ctx, ldb)
   469  
   470  	// delete all chunks
   471  	// (only count the ones actually deleted, the rest will have been gc'd)
   472  	deletes := 0
   473  	for i := 0; i < n; i++ {
   474  		if ldb.Delete(chunks[i].Address()) == nil {
   475  			deletes++
   476  		}
   477  	}
   478  
   479  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   480  
   481  	if ldb.entryCnt != 0 {
   482  		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
   483  	}
   484  
   485  	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
   486  	expAccessCnt := uint64(n)
   487  	if ldb.accessCnt != expAccessCnt {
   488  		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
   489  	}
   490  
   491  	// retrieve the gc round target count for the db capacity
   492  	ldb.startGC(capacity)
   493  	roundTarget := ldb.gc.target
   494  
   495  	remaining := n
   496  	var puts int
   497  	for remaining > 0 {
   498  		var putCount int
   499  		if remaining < roundTarget {
   500  			putCount = remaining
   501  		} else {
   502  			putCount = roundTarget
   503  		}
   504  		remaining -= putCount
   505  		for putCount > 0 {
   506  			ldb.Put(context.TODO(), chunks[puts])
   507  			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
   508  			puts++
   509  			putCount--
   510  		}
   511  
   512  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   513  		defer cancel()
   514  		waitGc(ctx, ldb)
   515  	}
   516  
   517  	// expect first surplus chunks to be missing, because they have the smallest access value
   518  	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
   519  	for i := 0; i < expectMissing; i++ {
   520  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   521  		if err == nil {
   522  			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
   523  		}
   524  	}
   525  
   526  	// expect last chunks to be present, as they have the largest access value
   527  	for i := expectMissing; i < n; i++ {
   528  		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
   529  		if err != nil {
   530  			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
   531  		}
   532  		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
   533  			t.Fatal("expected to get the same data back, but got smth else")
   534  		}
   535  	}
   536  }
   537  
   538  // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
   539  func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
   540  
   541  	capacity := defaultMaxGCRound * 2
   542  	n := capacity - 1
   543  
   544  	ldb, cleanup := newLDBStore(t)
   545  	ldb.setCapacity(uint64(capacity))
   546  	defer cleanup()
   547  
   548  	chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
   549  	if err != nil {
   550  		t.Fatal(err.Error())
   551  	}
   552  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   553  
   554  	// set first added capacity/2 chunks to highest accesscount
   555  	for i := 0; i < capacity/2; i++ {
   556  		_, err := ldb.Get(context.TODO(), chunks[i].Address())
   557  		if err != nil {
   558  			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
   559  		}
   560  	}
   561  	_, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize))
   562  	if err != nil {
   563  		t.Fatal(err.Error())
   564  	}
   565  
   566  	// wait for garbage collection to kick in on the responsible actor
   567  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
   568  	defer cancel()
   569  	waitGc(ctx, ldb)
   570  
   571  	var missing int
   572  	for i, ch := range chunks[2 : capacity/2] {
   573  		ret, err := ldb.Get(context.TODO(), ch.Address())
   574  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   575  			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
   576  		}
   577  
   578  		if !bytes.Equal(ret.Data(), ch.Data()) {
   579  			t.Fatal("expected to get the same data back, but got smth else")
   580  		}
   581  		log.Trace("got back chunk", "chunk", ret)
   582  	}
   583  
   584  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   585  }
   586  
   587  func waitGc(ctx context.Context, ldb *LDBStore) {
   588  	<-ldb.gc.runC
   589  	ldb.gc.runC <- struct{}{}
   590  }