github.com/gobitfly/go-ethereum@v1.8.12/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"os"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/swarm/log"
    30  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    31  
    32  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    33  )
    34  
    35  type testDbStore struct {
    36  	*LDBStore
    37  	dir string
    38  }
    39  
    40  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    41  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    42  	if err != nil {
    43  		return nil, func() {}, err
    44  	}
    45  
    46  	var db *LDBStore
    47  	storeparams := NewDefaultStoreParams()
    48  	params := NewLDBStoreParams(storeparams, dir)
    49  	params.Po = testPoFunc
    50  
    51  	if mock {
    52  		globalStore := mem.NewGlobalStore()
    53  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    54  		mockStore := globalStore.NewNodeStore(addr)
    55  
    56  		db, err = NewMockDbStore(params, mockStore)
    57  	} else {
    58  		db, err = NewLDBStore(params)
    59  	}
    60  
    61  	cleanup := func() {
    62  		if err != nil {
    63  			db.Close()
    64  		}
    65  		err = os.RemoveAll(dir)
    66  		if err != nil {
    67  			panic("db cleanup failed")
    68  		}
    69  	}
    70  
    71  	return &testDbStore{db, dir}, cleanup, err
    72  }
    73  
    74  func testPoFunc(k Address) (ret uint8) {
    75  	basekey := make([]byte, 32)
    76  	return uint8(Proximity(basekey[:], k[:]))
    77  }
    78  
    79  func (db *testDbStore) close() {
    80  	db.Close()
    81  	err := os.RemoveAll(db.dir)
    82  	if err != nil {
    83  		panic(err)
    84  	}
    85  }
    86  
    87  func testDbStoreRandom(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    88  	db, cleanup, err := newTestDbStore(mock, true)
    89  	defer cleanup()
    90  	if err != nil {
    91  		t.Fatalf("init dbStore failed: %v", err)
    92  	}
    93  	testStoreRandom(db, processors, n, chunksize, t)
    94  }
    95  
    96  func testDbStoreCorrect(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    97  	db, cleanup, err := newTestDbStore(mock, false)
    98  	defer cleanup()
    99  	if err != nil {
   100  		t.Fatalf("init dbStore failed: %v", err)
   101  	}
   102  	testStoreCorrect(db, processors, n, chunksize, t)
   103  }
   104  
   105  func TestDbStoreRandom_1(t *testing.T) {
   106  	testDbStoreRandom(1, 1, 0, false, t)
   107  }
   108  
   109  func TestDbStoreCorrect_1(t *testing.T) {
   110  	testDbStoreCorrect(1, 1, 4096, false, t)
   111  }
   112  
   113  func TestDbStoreRandom_1_5k(t *testing.T) {
   114  	testDbStoreRandom(8, 5000, 0, false, t)
   115  }
   116  
   117  func TestDbStoreRandom_8_5k(t *testing.T) {
   118  	testDbStoreRandom(8, 5000, 0, false, t)
   119  }
   120  
   121  func TestDbStoreCorrect_1_5k(t *testing.T) {
   122  	testDbStoreCorrect(1, 5000, 4096, false, t)
   123  }
   124  
   125  func TestDbStoreCorrect_8_5k(t *testing.T) {
   126  	testDbStoreCorrect(8, 5000, 4096, false, t)
   127  }
   128  
   129  func TestMockDbStoreRandom_1(t *testing.T) {
   130  	testDbStoreRandom(1, 1, 0, true, t)
   131  }
   132  
   133  func TestMockDbStoreCorrect_1(t *testing.T) {
   134  	testDbStoreCorrect(1, 1, 4096, true, t)
   135  }
   136  
   137  func TestMockDbStoreRandom_1_5k(t *testing.T) {
   138  	testDbStoreRandom(8, 5000, 0, true, t)
   139  }
   140  
   141  func TestMockDbStoreRandom_8_5k(t *testing.T) {
   142  	testDbStoreRandom(8, 5000, 0, true, t)
   143  }
   144  
   145  func TestMockDbStoreCorrect_1_5k(t *testing.T) {
   146  	testDbStoreCorrect(1, 5000, 4096, true, t)
   147  }
   148  
   149  func TestMockDbStoreCorrect_8_5k(t *testing.T) {
   150  	testDbStoreCorrect(8, 5000, 4096, true, t)
   151  }
   152  
   153  func testDbStoreNotFound(t *testing.T, mock bool) {
   154  	db, cleanup, err := newTestDbStore(mock, false)
   155  	defer cleanup()
   156  	if err != nil {
   157  		t.Fatalf("init dbStore failed: %v", err)
   158  	}
   159  
   160  	_, err = db.Get(ZeroAddr)
   161  	if err != ErrChunkNotFound {
   162  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   163  	}
   164  }
   165  
   166  func TestDbStoreNotFound(t *testing.T) {
   167  	testDbStoreNotFound(t, false)
   168  }
   169  func TestMockDbStoreNotFound(t *testing.T) {
   170  	testDbStoreNotFound(t, true)
   171  }
   172  
   173  func testIterator(t *testing.T, mock bool) {
   174  	var chunkcount int = 32
   175  	var i int
   176  	var poc uint
   177  	chunkkeys := NewAddressCollection(chunkcount)
   178  	chunkkeys_results := NewAddressCollection(chunkcount)
   179  
   180  	db, cleanup, err := newTestDbStore(mock, false)
   181  	defer cleanup()
   182  	if err != nil {
   183  		t.Fatalf("init dbStore failed: %v", err)
   184  	}
   185  
   186  	chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount)
   187  
   188  	wg := &sync.WaitGroup{}
   189  	wg.Add(len(chunks))
   190  	for i = 0; i < len(chunks); i++ {
   191  		db.Put(chunks[i])
   192  		chunkkeys[i] = chunks[i].Addr
   193  		j := i
   194  		go func() {
   195  			defer wg.Done()
   196  			<-chunks[j].dbStoredC
   197  		}()
   198  	}
   199  
   200  	//testSplit(m, l, 128, chunkkeys, t)
   201  
   202  	for i = 0; i < len(chunkkeys); i++ {
   203  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   204  	}
   205  	wg.Wait()
   206  	i = 0
   207  	for poc = 0; poc <= 255; poc++ {
   208  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   209  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   210  			chunkkeys_results[n-1] = k
   211  			i++
   212  			return true
   213  		})
   214  		if err != nil {
   215  			t.Fatalf("Iterator call failed: %v", err)
   216  		}
   217  	}
   218  
   219  	for i = 0; i < chunkcount; i++ {
   220  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   221  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   222  		}
   223  	}
   224  
   225  }
   226  
   227  func TestIterator(t *testing.T) {
   228  	testIterator(t, false)
   229  }
   230  func TestMockIterator(t *testing.T) {
   231  	testIterator(t, true)
   232  }
   233  
   234  func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   235  	db, cleanup, err := newTestDbStore(mock, true)
   236  	defer cleanup()
   237  	if err != nil {
   238  		b.Fatalf("init dbStore failed: %v", err)
   239  	}
   240  	benchmarkStorePut(db, processors, n, chunksize, b)
   241  }
   242  
   243  func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   244  	db, cleanup, err := newTestDbStore(mock, true)
   245  	defer cleanup()
   246  	if err != nil {
   247  		b.Fatalf("init dbStore failed: %v", err)
   248  	}
   249  	benchmarkStoreGet(db, processors, n, chunksize, b)
   250  }
   251  
   252  func BenchmarkDbStorePut_1_500(b *testing.B) {
   253  	benchmarkDbStorePut(500, 1, 4096, false, b)
   254  }
   255  
   256  func BenchmarkDbStorePut_8_500(b *testing.B) {
   257  	benchmarkDbStorePut(500, 8, 4096, false, b)
   258  }
   259  
   260  func BenchmarkDbStoreGet_1_500(b *testing.B) {
   261  	benchmarkDbStoreGet(500, 1, 4096, false, b)
   262  }
   263  
   264  func BenchmarkDbStoreGet_8_500(b *testing.B) {
   265  	benchmarkDbStoreGet(500, 8, 4096, false, b)
   266  }
   267  
   268  func BenchmarkMockDbStorePut_1_500(b *testing.B) {
   269  	benchmarkDbStorePut(500, 1, 4096, true, b)
   270  }
   271  
   272  func BenchmarkMockDbStorePut_8_500(b *testing.B) {
   273  	benchmarkDbStorePut(500, 8, 4096, true, b)
   274  }
   275  
   276  func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
   277  	benchmarkDbStoreGet(500, 1, 4096, true, b)
   278  }
   279  
   280  func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
   281  	benchmarkDbStoreGet(500, 8, 4096, true, b)
   282  }
   283  
   284  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   285  // retrieve them, provided we don't hit the garbage collection
   286  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   287  	capacity := 50
   288  	n := 10
   289  
   290  	ldb, cleanup := newLDBStore(t)
   291  	ldb.setCapacity(uint64(capacity))
   292  	defer cleanup()
   293  
   294  	chunks := []*Chunk{}
   295  	for i := 0; i < n; i++ {
   296  		c := GenerateRandomChunk(DefaultChunkSize)
   297  		chunks = append(chunks, c)
   298  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   299  	}
   300  
   301  	for i := 0; i < n; i++ {
   302  		go ldb.Put(chunks[i])
   303  	}
   304  
   305  	// wait for all chunks to be stored
   306  	for i := 0; i < n; i++ {
   307  		<-chunks[i].dbStoredC
   308  	}
   309  
   310  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   311  
   312  	for i := 0; i < n; i++ {
   313  		ret, err := ldb.Get(chunks[i].Addr)
   314  		if err != nil {
   315  			t.Fatal(err)
   316  		}
   317  
   318  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   319  			t.Fatal("expected to get the same data back, but got smth else")
   320  		}
   321  
   322  		log.Info("got back chunk", "chunk", ret)
   323  	}
   324  
   325  	if ldb.entryCnt != uint64(n+1) {
   326  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt)
   327  	}
   328  
   329  	if ldb.accessCnt != uint64(2*n+1) {
   330  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt)
   331  	}
   332  }
   333  
   334  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   335  // retrieve only some of them, because garbage collection must have cleared some of them
   336  func TestLDBStoreCollectGarbage(t *testing.T) {
   337  	capacity := 500
   338  	n := 2000
   339  
   340  	ldb, cleanup := newLDBStore(t)
   341  	ldb.setCapacity(uint64(capacity))
   342  	defer cleanup()
   343  
   344  	chunks := []*Chunk{}
   345  	for i := 0; i < n; i++ {
   346  		c := GenerateRandomChunk(DefaultChunkSize)
   347  		chunks = append(chunks, c)
   348  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   349  	}
   350  
   351  	for i := 0; i < n; i++ {
   352  		ldb.Put(chunks[i])
   353  	}
   354  
   355  	// wait for all chunks to be stored
   356  	for i := 0; i < n; i++ {
   357  		<-chunks[i].dbStoredC
   358  	}
   359  
   360  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   361  
   362  	// wait for garbage collection to kick in on the responsible actor
   363  	time.Sleep(5 * time.Second)
   364  
   365  	var missing int
   366  	for i := 0; i < n; i++ {
   367  		ret, err := ldb.Get(chunks[i].Addr)
   368  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   369  			missing++
   370  			continue
   371  		}
   372  		if err != nil {
   373  			t.Fatal(err)
   374  		}
   375  
   376  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   377  			t.Fatal("expected to get the same data back, but got smth else")
   378  		}
   379  
   380  		log.Trace("got back chunk", "chunk", ret)
   381  	}
   382  
   383  	if missing < n-capacity {
   384  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing)
   385  	}
   386  
   387  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   388  }
   389  
   390  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   391  func TestLDBStoreAddRemove(t *testing.T) {
   392  	ldb, cleanup := newLDBStore(t)
   393  	ldb.setCapacity(200)
   394  	defer cleanup()
   395  
   396  	n := 100
   397  
   398  	chunks := []*Chunk{}
   399  	for i := 0; i < n; i++ {
   400  		c := GenerateRandomChunk(DefaultChunkSize)
   401  		chunks = append(chunks, c)
   402  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   403  	}
   404  
   405  	for i := 0; i < n; i++ {
   406  		go ldb.Put(chunks[i])
   407  	}
   408  
   409  	// wait for all chunks to be stored before continuing
   410  	for i := 0; i < n; i++ {
   411  		<-chunks[i].dbStoredC
   412  	}
   413  
   414  	for i := 0; i < n; i++ {
   415  		// delete all even index chunks
   416  		if i%2 == 0 {
   417  
   418  			key := chunks[i].Addr
   419  			ikey := getIndexKey(key)
   420  
   421  			var indx dpaDBIndex
   422  			ldb.tryAccessIdx(ikey, &indx)
   423  
   424  			ldb.delete(indx.Idx, ikey, ldb.po(key))
   425  		}
   426  	}
   427  
   428  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   429  
   430  	for i := 0; i < n; i++ {
   431  		ret, err := ldb.Get(chunks[i].Addr)
   432  
   433  		if i%2 == 0 {
   434  			// expect even chunks to be missing
   435  			if err == nil || ret != nil {
   436  				t.Fatal("expected chunk to be missing, but got no error")
   437  			}
   438  		} else {
   439  			// expect odd chunks to be retrieved successfully
   440  			if err != nil {
   441  				t.Fatalf("expected no error, but got %s", err)
   442  			}
   443  
   444  			if !bytes.Equal(ret.SData, chunks[i].SData) {
   445  				t.Fatal("expected to get the same data back, but got smth else")
   446  			}
   447  		}
   448  	}
   449  }
   450  
   451  // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
   452  func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   453  	capacity := 10
   454  
   455  	ldb, cleanup := newLDBStore(t)
   456  	ldb.setCapacity(uint64(capacity))
   457  
   458  	n := 7
   459  
   460  	chunks := []*Chunk{}
   461  	for i := 0; i < capacity; i++ {
   462  		c := GenerateRandomChunk(DefaultChunkSize)
   463  		chunks = append(chunks, c)
   464  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   465  	}
   466  
   467  	for i := 0; i < n; i++ {
   468  		ldb.Put(chunks[i])
   469  	}
   470  
   471  	// wait for all chunks to be stored before continuing
   472  	for i := 0; i < n; i++ {
   473  		<-chunks[i].dbStoredC
   474  	}
   475  
   476  	// delete all chunks
   477  	for i := 0; i < n; i++ {
   478  		key := chunks[i].Addr
   479  		ikey := getIndexKey(key)
   480  
   481  		var indx dpaDBIndex
   482  		ldb.tryAccessIdx(ikey, &indx)
   483  
   484  		ldb.delete(indx.Idx, ikey, ldb.po(key))
   485  	}
   486  
   487  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   488  
   489  	cleanup()
   490  
   491  	ldb, cleanup = newLDBStore(t)
   492  	ldb.setCapacity(uint64(capacity))
   493  
   494  	n = 10
   495  
   496  	for i := 0; i < n; i++ {
   497  		ldb.Put(chunks[i])
   498  	}
   499  
   500  	// wait for all chunks to be stored before continuing
   501  	for i := 0; i < n; i++ {
   502  		<-chunks[i].dbStoredC
   503  	}
   504  
   505  	// expect for first chunk to be missing, because it has the smallest access value
   506  	idx := 0
   507  	ret, err := ldb.Get(chunks[idx].Addr)
   508  	if err == nil || ret != nil {
   509  		t.Fatal("expected first chunk to be missing, but got no error")
   510  	}
   511  
   512  	// expect for last chunk to be present, as it has the largest access value
   513  	idx = 9
   514  	ret, err = ldb.Get(chunks[idx].Addr)
   515  	if err != nil {
   516  		t.Fatalf("expected no error, but got %s", err)
   517  	}
   518  
   519  	if !bytes.Equal(ret.SData, chunks[idx].SData) {
   520  		t.Fatal("expected to get the same data back, but got smth else")
   521  	}
   522  }