github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"os"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/swarm/chunk"
    31  	"github.com/ethereum/go-ethereum/swarm/log"
    32  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    33  
    34  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    35  )
    36  
    37  type testDbStore struct {
    38  	*LDBStore
    39  	dir string
    40  }
    41  
    42  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    43  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    44  	if err != nil {
    45  		return nil, func() {}, err
    46  	}
    47  
    48  	var db *LDBStore
    49  	storeparams := NewDefaultStoreParams()
    50  	params := NewLDBStoreParams(storeparams, dir)
    51  	params.Po = testPoFunc
    52  
    53  	if mock {
    54  		globalStore := mem.NewGlobalStore()
    55  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    56  		mockStore := globalStore.NewNodeStore(addr)
    57  
    58  		db, err = NewMockDbStore(params, mockStore)
    59  	} else {
    60  		db, err = NewLDBStore(params)
    61  	}
    62  
    63  	cleanup := func() {
    64  		if db != nil {
    65  			db.Close()
    66  		}
    67  		err = os.RemoveAll(dir)
    68  		if err != nil {
    69  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    70  		}
    71  	}
    72  
    73  	return &testDbStore{db, dir}, cleanup, err
    74  }
    75  
    76  func testPoFunc(k Address) (ret uint8) {
    77  	basekey := make([]byte, 32)
    78  	return uint8(Proximity(basekey[:], k[:]))
    79  }
    80  
    81  func (db *testDbStore) close() {
    82  	db.Close()
    83  	err := os.RemoveAll(db.dir)
    84  	if err != nil {
    85  		panic(err)
    86  	}
    87  }
    88  
    89  func testDbStoreRandom(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    90  	db, cleanup, err := newTestDbStore(mock, true)
    91  	defer cleanup()
    92  	if err != nil {
    93  		t.Fatalf("init dbStore failed: %v", err)
    94  	}
    95  	testStoreRandom(db, processors, n, chunksize, t)
    96  }
    97  
    98  func testDbStoreCorrect(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    99  	db, cleanup, err := newTestDbStore(mock, false)
   100  	defer cleanup()
   101  	if err != nil {
   102  		t.Fatalf("init dbStore failed: %v", err)
   103  	}
   104  	testStoreCorrect(db, processors, n, chunksize, t)
   105  }
   106  
   107  func TestDbStoreRandom_1(t *testing.T) {
   108  	testDbStoreRandom(1, 1, 0, false, t)
   109  }
   110  
   111  func TestDbStoreCorrect_1(t *testing.T) {
   112  	testDbStoreCorrect(1, 1, 4096, false, t)
   113  }
   114  
   115  func TestDbStoreRandom_1_5k(t *testing.T) {
   116  	testDbStoreRandom(8, 5000, 0, false, t)
   117  }
   118  
   119  func TestDbStoreRandom_8_5k(t *testing.T) {
   120  	testDbStoreRandom(8, 5000, 0, false, t)
   121  }
   122  
   123  func TestDbStoreCorrect_1_5k(t *testing.T) {
   124  	testDbStoreCorrect(1, 5000, 4096, false, t)
   125  }
   126  
   127  func TestDbStoreCorrect_8_5k(t *testing.T) {
   128  	testDbStoreCorrect(8, 5000, 4096, false, t)
   129  }
   130  
   131  func TestMockDbStoreRandom_1(t *testing.T) {
   132  	testDbStoreRandom(1, 1, 0, true, t)
   133  }
   134  
   135  func TestMockDbStoreCorrect_1(t *testing.T) {
   136  	testDbStoreCorrect(1, 1, 4096, true, t)
   137  }
   138  
   139  func TestMockDbStoreRandom_1_5k(t *testing.T) {
   140  	testDbStoreRandom(8, 5000, 0, true, t)
   141  }
   142  
   143  func TestMockDbStoreRandom_8_5k(t *testing.T) {
   144  	testDbStoreRandom(8, 5000, 0, true, t)
   145  }
   146  
   147  func TestMockDbStoreCorrect_1_5k(t *testing.T) {
   148  	testDbStoreCorrect(1, 5000, 4096, true, t)
   149  }
   150  
   151  func TestMockDbStoreCorrect_8_5k(t *testing.T) {
   152  	testDbStoreCorrect(8, 5000, 4096, true, t)
   153  }
   154  
   155  func testDbStoreNotFound(t *testing.T, mock bool) {
   156  	db, cleanup, err := newTestDbStore(mock, false)
   157  	defer cleanup()
   158  	if err != nil {
   159  		t.Fatalf("init dbStore failed: %v", err)
   160  	}
   161  
   162  	_, err = db.Get(context.TODO(), ZeroAddr)
   163  	if err != ErrChunkNotFound {
   164  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   165  	}
   166  }
   167  
   168  func TestDbStoreNotFound(t *testing.T) {
   169  	testDbStoreNotFound(t, false)
   170  }
   171  func TestMockDbStoreNotFound(t *testing.T) {
   172  	testDbStoreNotFound(t, true)
   173  }
   174  
   175  func testIterator(t *testing.T, mock bool) {
   176  	var chunkcount int = 32
   177  	var i int
   178  	var poc uint
   179  	chunkkeys := NewAddressCollection(chunkcount)
   180  	chunkkeys_results := NewAddressCollection(chunkcount)
   181  
   182  	db, cleanup, err := newTestDbStore(mock, false)
   183  	defer cleanup()
   184  	if err != nil {
   185  		t.Fatalf("init dbStore failed: %v", err)
   186  	}
   187  
   188  	chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
   189  
   190  	wg := &sync.WaitGroup{}
   191  	wg.Add(len(chunks))
   192  	for i = 0; i < len(chunks); i++ {
   193  		db.Put(context.TODO(), chunks[i])
   194  		chunkkeys[i] = chunks[i].Addr
   195  		j := i
   196  		go func() {
   197  			defer wg.Done()
   198  			<-chunks[j].dbStoredC
   199  		}()
   200  	}
   201  
   202  	//testSplit(m, l, 128, chunkkeys, t)
   203  
   204  	for i = 0; i < len(chunkkeys); i++ {
   205  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   206  	}
   207  	wg.Wait()
   208  	i = 0
   209  	for poc = 0; poc <= 255; poc++ {
   210  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   211  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   212  			chunkkeys_results[n-1] = k
   213  			i++
   214  			return true
   215  		})
   216  		if err != nil {
   217  			t.Fatalf("Iterator call failed: %v", err)
   218  		}
   219  	}
   220  
   221  	for i = 0; i < chunkcount; i++ {
   222  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   223  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   224  		}
   225  	}
   226  
   227  }
   228  
   229  func TestIterator(t *testing.T) {
   230  	testIterator(t, false)
   231  }
   232  func TestMockIterator(t *testing.T) {
   233  	testIterator(t, true)
   234  }
   235  
   236  func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   237  	db, cleanup, err := newTestDbStore(mock, true)
   238  	defer cleanup()
   239  	if err != nil {
   240  		b.Fatalf("init dbStore failed: %v", err)
   241  	}
   242  	benchmarkStorePut(db, processors, n, chunksize, b)
   243  }
   244  
   245  func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   246  	db, cleanup, err := newTestDbStore(mock, true)
   247  	defer cleanup()
   248  	if err != nil {
   249  		b.Fatalf("init dbStore failed: %v", err)
   250  	}
   251  	benchmarkStoreGet(db, processors, n, chunksize, b)
   252  }
   253  
   254  func BenchmarkDbStorePut_1_500(b *testing.B) {
   255  	benchmarkDbStorePut(500, 1, 4096, false, b)
   256  }
   257  
   258  func BenchmarkDbStorePut_8_500(b *testing.B) {
   259  	benchmarkDbStorePut(500, 8, 4096, false, b)
   260  }
   261  
   262  func BenchmarkDbStoreGet_1_500(b *testing.B) {
   263  	benchmarkDbStoreGet(500, 1, 4096, false, b)
   264  }
   265  
   266  func BenchmarkDbStoreGet_8_500(b *testing.B) {
   267  	benchmarkDbStoreGet(500, 8, 4096, false, b)
   268  }
   269  
   270  func BenchmarkMockDbStorePut_1_500(b *testing.B) {
   271  	benchmarkDbStorePut(500, 1, 4096, true, b)
   272  }
   273  
   274  func BenchmarkMockDbStorePut_8_500(b *testing.B) {
   275  	benchmarkDbStorePut(500, 8, 4096, true, b)
   276  }
   277  
   278  func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
   279  	benchmarkDbStoreGet(500, 1, 4096, true, b)
   280  }
   281  
   282  func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
   283  	benchmarkDbStoreGet(500, 8, 4096, true, b)
   284  }
   285  
   286  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   287  // retrieve them, provided we don't hit the garbage collection
   288  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   289  	capacity := 50
   290  	n := 10
   291  
   292  	ldb, cleanup := newLDBStore(t)
   293  	ldb.setCapacity(uint64(capacity))
   294  	defer cleanup()
   295  
   296  	chunks := []*Chunk{}
   297  	for i := 0; i < n; i++ {
   298  		c := GenerateRandomChunk(chunk.DefaultSize)
   299  		chunks = append(chunks, c)
   300  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   301  	}
   302  
   303  	for i := 0; i < n; i++ {
   304  		go ldb.Put(context.TODO(), chunks[i])
   305  	}
   306  
   307  	// wait for all chunks to be stored
   308  	for i := 0; i < n; i++ {
   309  		<-chunks[i].dbStoredC
   310  	}
   311  
   312  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   313  
   314  	for i := 0; i < n; i++ {
   315  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   316  		if err != nil {
   317  			t.Fatal(err)
   318  		}
   319  
   320  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   321  			t.Fatal("expected to get the same data back, but got smth else")
   322  		}
   323  
   324  		log.Info("got back chunk", "chunk", ret)
   325  	}
   326  
   327  	if ldb.entryCnt != uint64(n+1) {
   328  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt)
   329  	}
   330  
   331  	if ldb.accessCnt != uint64(2*n+1) {
   332  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt)
   333  	}
   334  }
   335  
   336  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   337  // retrieve only some of them, because garbage collection must have cleared some of them
   338  func TestLDBStoreCollectGarbage(t *testing.T) {
   339  	capacity := 500
   340  	n := 2000
   341  
   342  	ldb, cleanup := newLDBStore(t)
   343  	ldb.setCapacity(uint64(capacity))
   344  	defer cleanup()
   345  
   346  	chunks := []*Chunk{}
   347  	for i := 0; i < n; i++ {
   348  		c := GenerateRandomChunk(chunk.DefaultSize)
   349  		chunks = append(chunks, c)
   350  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   351  	}
   352  
   353  	for i := 0; i < n; i++ {
   354  		ldb.Put(context.TODO(), chunks[i])
   355  	}
   356  
   357  	// wait for all chunks to be stored
   358  	for i := 0; i < n; i++ {
   359  		<-chunks[i].dbStoredC
   360  	}
   361  
   362  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   363  
   364  	// wait for garbage collection to kick in on the responsible actor
   365  	time.Sleep(5 * time.Second)
   366  
   367  	var missing int
   368  	for i := 0; i < n; i++ {
   369  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   370  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   371  			missing++
   372  			continue
   373  		}
   374  		if err != nil {
   375  			t.Fatal(err)
   376  		}
   377  
   378  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   379  			t.Fatal("expected to get the same data back, but got smth else")
   380  		}
   381  
   382  		log.Trace("got back chunk", "chunk", ret)
   383  	}
   384  
   385  	if missing < n-capacity {
   386  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing)
   387  	}
   388  
   389  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   390  }
   391  
   392  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   393  func TestLDBStoreAddRemove(t *testing.T) {
   394  	ldb, cleanup := newLDBStore(t)
   395  	ldb.setCapacity(200)
   396  	defer cleanup()
   397  
   398  	n := 100
   399  
   400  	chunks := []*Chunk{}
   401  	for i := 0; i < n; i++ {
   402  		c := GenerateRandomChunk(chunk.DefaultSize)
   403  		chunks = append(chunks, c)
   404  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   405  	}
   406  
   407  	for i := 0; i < n; i++ {
   408  		go ldb.Put(context.TODO(), chunks[i])
   409  	}
   410  
   411  	// wait for all chunks to be stored before continuing
   412  	for i := 0; i < n; i++ {
   413  		<-chunks[i].dbStoredC
   414  	}
   415  
   416  	for i := 0; i < n; i++ {
   417  		// delete all even index chunks
   418  		if i%2 == 0 {
   419  
   420  			key := chunks[i].Addr
   421  			ikey := getIndexKey(key)
   422  
   423  			var indx dpaDBIndex
   424  			ldb.tryAccessIdx(ikey, &indx)
   425  
   426  			ldb.delete(indx.Idx, ikey, ldb.po(key))
   427  		}
   428  	}
   429  
   430  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   431  
   432  	for i := 0; i < n; i++ {
   433  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   434  
   435  		if i%2 == 0 {
   436  			// expect even chunks to be missing
   437  			if err == nil || ret != nil {
   438  				t.Fatal("expected chunk to be missing, but got no error")
   439  			}
   440  		} else {
   441  			// expect odd chunks to be retrieved successfully
   442  			if err != nil {
   443  				t.Fatalf("expected no error, but got %s", err)
   444  			}
   445  
   446  			if !bytes.Equal(ret.SData, chunks[i].SData) {
   447  				t.Fatal("expected to get the same data back, but got smth else")
   448  			}
   449  		}
   450  	}
   451  }
   452  
   453  // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
   454  func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   455  	capacity := 10
   456  
   457  	ldb, cleanup := newLDBStore(t)
   458  	ldb.setCapacity(uint64(capacity))
   459  
   460  	n := 7
   461  
   462  	chunks := []*Chunk{}
   463  	for i := 0; i < capacity; i++ {
   464  		c := GenerateRandomChunk(chunk.DefaultSize)
   465  		chunks = append(chunks, c)
   466  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   467  	}
   468  
   469  	for i := 0; i < n; i++ {
   470  		ldb.Put(context.TODO(), chunks[i])
   471  	}
   472  
   473  	// wait for all chunks to be stored before continuing
   474  	for i := 0; i < n; i++ {
   475  		<-chunks[i].dbStoredC
   476  	}
   477  
   478  	// delete all chunks
   479  	for i := 0; i < n; i++ {
   480  		key := chunks[i].Addr
   481  		ikey := getIndexKey(key)
   482  
   483  		var indx dpaDBIndex
   484  		ldb.tryAccessIdx(ikey, &indx)
   485  
   486  		ldb.delete(indx.Idx, ikey, ldb.po(key))
   487  	}
   488  
   489  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   490  
   491  	cleanup()
   492  
   493  	ldb, cleanup = newLDBStore(t)
   494  	ldb.setCapacity(uint64(capacity))
   495  
   496  	n = 10
   497  
   498  	for i := 0; i < n; i++ {
   499  		ldb.Put(context.TODO(), chunks[i])
   500  	}
   501  
   502  	// wait for all chunks to be stored before continuing
   503  	for i := 0; i < n; i++ {
   504  		<-chunks[i].dbStoredC
   505  	}
   506  
   507  	// expect for first chunk to be missing, because it has the smallest access value
   508  	idx := 0
   509  	ret, err := ldb.Get(context.TODO(), chunks[idx].Addr)
   510  	if err == nil || ret != nil {
   511  		t.Fatal("expected first chunk to be missing, but got no error")
   512  	}
   513  
   514  	// expect for last chunk to be present, as it has the largest access value
   515  	idx = 9
   516  	ret, err = ldb.Get(context.TODO(), chunks[idx].Addr)
   517  	if err != nil {
   518  		t.Fatalf("expected no error, but got %s", err)
   519  	}
   520  
   521  	if !bytes.Equal(ret.SData, chunks[idx].SData) {
   522  		t.Fatal("expected to get the same data back, but got smth else")
   523  	}
   524  }