github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/swarm/storage/ldbstore_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"os"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/swarm/log"
    31  	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    32  
    33  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    34  )
    35  
    36  type testDbStore struct {
    37  	*LDBStore
    38  	dir string
    39  }
    40  
    41  func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
    42  	dir, err := ioutil.TempDir("", "bzz-storage-test")
    43  	if err != nil {
    44  		return nil, func() {}, err
    45  	}
    46  
    47  	var db *LDBStore
    48  	storeparams := NewDefaultStoreParams()
    49  	params := NewLDBStoreParams(storeparams, dir)
    50  	params.Po = testPoFunc
    51  
    52  	if mock {
    53  		globalStore := mem.NewGlobalStore()
    54  		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
    55  		mockStore := globalStore.NewNodeStore(addr)
    56  
    57  		db, err = NewMockDbStore(params, mockStore)
    58  	} else {
    59  		db, err = NewLDBStore(params)
    60  	}
    61  
    62  	cleanup := func() {
    63  		if db != nil {
    64  			db.Close()
    65  		}
    66  		err = os.RemoveAll(dir)
    67  		if err != nil {
    68  			panic(fmt.Sprintf("db cleanup failed: %v", err))
    69  		}
    70  	}
    71  
    72  	return &testDbStore{db, dir}, cleanup, err
    73  }
    74  
    75  func testPoFunc(k Address) (ret uint8) {
    76  	basekey := make([]byte, 32)
    77  	return uint8(Proximity(basekey[:], k[:]))
    78  }
    79  
    80  func (db *testDbStore) close() {
    81  	db.Close()
    82  	err := os.RemoveAll(db.dir)
    83  	if err != nil {
    84  		panic(err)
    85  	}
    86  }
    87  
    88  func testDbStoreRandom(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    89  	db, cleanup, err := newTestDbStore(mock, true)
    90  	defer cleanup()
    91  	if err != nil {
    92  		t.Fatalf("init dbStore failed: %v", err)
    93  	}
    94  	testStoreRandom(db, processors, n, chunksize, t)
    95  }
    96  
    97  func testDbStoreCorrect(n int, processors int, chunksize int64, mock bool, t *testing.T) {
    98  	db, cleanup, err := newTestDbStore(mock, false)
    99  	defer cleanup()
   100  	if err != nil {
   101  		t.Fatalf("init dbStore failed: %v", err)
   102  	}
   103  	testStoreCorrect(db, processors, n, chunksize, t)
   104  }
   105  
   106  func TestDbStoreRandom_1(t *testing.T) {
   107  	testDbStoreRandom(1, 1, 0, false, t)
   108  }
   109  
   110  func TestDbStoreCorrect_1(t *testing.T) {
   111  	testDbStoreCorrect(1, 1, 4096, false, t)
   112  }
   113  
   114  func TestDbStoreRandom_1_5k(t *testing.T) {
   115  	testDbStoreRandom(8, 5000, 0, false, t)
   116  }
   117  
   118  func TestDbStoreRandom_8_5k(t *testing.T) {
   119  	testDbStoreRandom(8, 5000, 0, false, t)
   120  }
   121  
   122  func TestDbStoreCorrect_1_5k(t *testing.T) {
   123  	testDbStoreCorrect(1, 5000, 4096, false, t)
   124  }
   125  
   126  func TestDbStoreCorrect_8_5k(t *testing.T) {
   127  	testDbStoreCorrect(8, 5000, 4096, false, t)
   128  }
   129  
   130  func TestMockDbStoreRandom_1(t *testing.T) {
   131  	testDbStoreRandom(1, 1, 0, true, t)
   132  }
   133  
   134  func TestMockDbStoreCorrect_1(t *testing.T) {
   135  	testDbStoreCorrect(1, 1, 4096, true, t)
   136  }
   137  
   138  func TestMockDbStoreRandom_1_5k(t *testing.T) {
   139  	testDbStoreRandom(8, 5000, 0, true, t)
   140  }
   141  
   142  func TestMockDbStoreRandom_8_5k(t *testing.T) {
   143  	testDbStoreRandom(8, 5000, 0, true, t)
   144  }
   145  
   146  func TestMockDbStoreCorrect_1_5k(t *testing.T) {
   147  	testDbStoreCorrect(1, 5000, 4096, true, t)
   148  }
   149  
   150  func TestMockDbStoreCorrect_8_5k(t *testing.T) {
   151  	testDbStoreCorrect(8, 5000, 4096, true, t)
   152  }
   153  
   154  func testDbStoreNotFound(t *testing.T, mock bool) {
   155  	db, cleanup, err := newTestDbStore(mock, false)
   156  	defer cleanup()
   157  	if err != nil {
   158  		t.Fatalf("init dbStore failed: %v", err)
   159  	}
   160  
   161  	_, err = db.Get(context.TODO(), ZeroAddr)
   162  	if err != ErrChunkNotFound {
   163  		t.Errorf("Expected ErrChunkNotFound, got %v", err)
   164  	}
   165  }
   166  
   167  func TestDbStoreNotFound(t *testing.T) {
   168  	testDbStoreNotFound(t, false)
   169  }
   170  func TestMockDbStoreNotFound(t *testing.T) {
   171  	testDbStoreNotFound(t, true)
   172  }
   173  
   174  func testIterator(t *testing.T, mock bool) {
   175  	var chunkcount int = 32
   176  	var i int
   177  	var poc uint
   178  	chunkkeys := NewAddressCollection(chunkcount)
   179  	chunkkeys_results := NewAddressCollection(chunkcount)
   180  
   181  	db, cleanup, err := newTestDbStore(mock, false)
   182  	defer cleanup()
   183  	if err != nil {
   184  		t.Fatalf("init dbStore failed: %v", err)
   185  	}
   186  
   187  	chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount)
   188  
   189  	wg := &sync.WaitGroup{}
   190  	wg.Add(len(chunks))
   191  	for i = 0; i < len(chunks); i++ {
   192  		db.Put(context.TODO(), chunks[i])
   193  		chunkkeys[i] = chunks[i].Addr
   194  		j := i
   195  		go func() {
   196  			defer wg.Done()
   197  			<-chunks[j].dbStoredC
   198  		}()
   199  	}
   200  
   201  	//testSplit(m, l, 128, chunkkeys, t)
   202  
   203  	for i = 0; i < len(chunkkeys); i++ {
   204  		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
   205  	}
   206  	wg.Wait()
   207  	i = 0
   208  	for poc = 0; poc <= 255; poc++ {
   209  		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
   210  			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
   211  			chunkkeys_results[n-1] = k
   212  			i++
   213  			return true
   214  		})
   215  		if err != nil {
   216  			t.Fatalf("Iterator call failed: %v", err)
   217  		}
   218  	}
   219  
   220  	for i = 0; i < chunkcount; i++ {
   221  		if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
   222  			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
   223  		}
   224  	}
   225  
   226  }
   227  
   228  func TestIterator(t *testing.T) {
   229  	testIterator(t, false)
   230  }
   231  func TestMockIterator(t *testing.T) {
   232  	testIterator(t, true)
   233  }
   234  
   235  func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   236  	db, cleanup, err := newTestDbStore(mock, true)
   237  	defer cleanup()
   238  	if err != nil {
   239  		b.Fatalf("init dbStore failed: %v", err)
   240  	}
   241  	benchmarkStorePut(db, processors, n, chunksize, b)
   242  }
   243  
   244  func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
   245  	db, cleanup, err := newTestDbStore(mock, true)
   246  	defer cleanup()
   247  	if err != nil {
   248  		b.Fatalf("init dbStore failed: %v", err)
   249  	}
   250  	benchmarkStoreGet(db, processors, n, chunksize, b)
   251  }
   252  
   253  func BenchmarkDbStorePut_1_500(b *testing.B) {
   254  	benchmarkDbStorePut(500, 1, 4096, false, b)
   255  }
   256  
   257  func BenchmarkDbStorePut_8_500(b *testing.B) {
   258  	benchmarkDbStorePut(500, 8, 4096, false, b)
   259  }
   260  
   261  func BenchmarkDbStoreGet_1_500(b *testing.B) {
   262  	benchmarkDbStoreGet(500, 1, 4096, false, b)
   263  }
   264  
   265  func BenchmarkDbStoreGet_8_500(b *testing.B) {
   266  	benchmarkDbStoreGet(500, 8, 4096, false, b)
   267  }
   268  
   269  func BenchmarkMockDbStorePut_1_500(b *testing.B) {
   270  	benchmarkDbStorePut(500, 1, 4096, true, b)
   271  }
   272  
   273  func BenchmarkMockDbStorePut_8_500(b *testing.B) {
   274  	benchmarkDbStorePut(500, 8, 4096, true, b)
   275  }
   276  
   277  func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
   278  	benchmarkDbStoreGet(500, 1, 4096, true, b)
   279  }
   280  
   281  func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
   282  	benchmarkDbStoreGet(500, 8, 4096, true, b)
   283  }
   284  
   285  // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
   286  // retrieve them, provided we don't hit the garbage collection
   287  func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
   288  	capacity := 50
   289  	n := 10
   290  
   291  	ldb, cleanup := newLDBStore(t)
   292  	ldb.setCapacity(uint64(capacity))
   293  	defer cleanup()
   294  
   295  	chunks := []*Chunk{}
   296  	for i := 0; i < n; i++ {
   297  		c := GenerateRandomChunk(DefaultChunkSize)
   298  		chunks = append(chunks, c)
   299  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   300  	}
   301  
   302  	for i := 0; i < n; i++ {
   303  		go ldb.Put(context.TODO(), chunks[i])
   304  	}
   305  
   306  	// wait for all chunks to be stored
   307  	for i := 0; i < n; i++ {
   308  		<-chunks[i].dbStoredC
   309  	}
   310  
   311  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   312  
   313  	for i := 0; i < n; i++ {
   314  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   315  		if err != nil {
   316  			t.Fatal(err)
   317  		}
   318  
   319  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   320  			t.Fatal("expected to get the same data back, but got smth else")
   321  		}
   322  
   323  		log.Info("got back chunk", "chunk", ret)
   324  	}
   325  
   326  	if ldb.entryCnt != uint64(n+1) {
   327  		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt)
   328  	}
   329  
   330  	if ldb.accessCnt != uint64(2*n+1) {
   331  		t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt)
   332  	}
   333  }
   334  
   335  // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
   336  // retrieve only some of them, because garbage collection must have cleared some of them
   337  func TestLDBStoreCollectGarbage(t *testing.T) {
   338  	capacity := 500
   339  	n := 2000
   340  
   341  	ldb, cleanup := newLDBStore(t)
   342  	ldb.setCapacity(uint64(capacity))
   343  	defer cleanup()
   344  
   345  	chunks := []*Chunk{}
   346  	for i := 0; i < n; i++ {
   347  		c := GenerateRandomChunk(DefaultChunkSize)
   348  		chunks = append(chunks, c)
   349  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   350  	}
   351  
   352  	for i := 0; i < n; i++ {
   353  		ldb.Put(context.TODO(), chunks[i])
   354  	}
   355  
   356  	// wait for all chunks to be stored
   357  	for i := 0; i < n; i++ {
   358  		<-chunks[i].dbStoredC
   359  	}
   360  
   361  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   362  
   363  	// wait for garbage collection to kick in on the responsible actor
   364  	time.Sleep(5 * time.Second)
   365  
   366  	var missing int
   367  	for i := 0; i < n; i++ {
   368  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   369  		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
   370  			missing++
   371  			continue
   372  		}
   373  		if err != nil {
   374  			t.Fatal(err)
   375  		}
   376  
   377  		if !bytes.Equal(ret.SData, chunks[i].SData) {
   378  			t.Fatal("expected to get the same data back, but got smth else")
   379  		}
   380  
   381  		log.Trace("got back chunk", "chunk", ret)
   382  	}
   383  
   384  	if missing < n-capacity {
   385  		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing)
   386  	}
   387  
   388  	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   389  }
   390  
   391  // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
   392  func TestLDBStoreAddRemove(t *testing.T) {
   393  	ldb, cleanup := newLDBStore(t)
   394  	ldb.setCapacity(200)
   395  	defer cleanup()
   396  
   397  	n := 100
   398  
   399  	chunks := []*Chunk{}
   400  	for i := 0; i < n; i++ {
   401  		c := GenerateRandomChunk(DefaultChunkSize)
   402  		chunks = append(chunks, c)
   403  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   404  	}
   405  
   406  	for i := 0; i < n; i++ {
   407  		go ldb.Put(context.TODO(), chunks[i])
   408  	}
   409  
   410  	// wait for all chunks to be stored before continuing
   411  	for i := 0; i < n; i++ {
   412  		<-chunks[i].dbStoredC
   413  	}
   414  
   415  	for i := 0; i < n; i++ {
   416  		// delete all even index chunks
   417  		if i%2 == 0 {
   418  
   419  			key := chunks[i].Addr
   420  			ikey := getIndexKey(key)
   421  
   422  			var indx dpaDBIndex
   423  			ldb.tryAccessIdx(ikey, &indx)
   424  
   425  			ldb.delete(indx.Idx, ikey, ldb.po(key))
   426  		}
   427  	}
   428  
   429  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   430  
   431  	for i := 0; i < n; i++ {
   432  		ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
   433  
   434  		if i%2 == 0 {
   435  			// expect even chunks to be missing
   436  			if err == nil || ret != nil {
   437  				t.Fatal("expected chunk to be missing, but got no error")
   438  			}
   439  		} else {
   440  			// expect odd chunks to be retrieved successfully
   441  			if err != nil {
   442  				t.Fatalf("expected no error, but got %s", err)
   443  			}
   444  
   445  			if !bytes.Equal(ret.SData, chunks[i].SData) {
   446  				t.Fatal("expected to get the same data back, but got smth else")
   447  			}
   448  		}
   449  	}
   450  }
   451  
   452  // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
   453  func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
   454  	capacity := 10
   455  
   456  	ldb, cleanup := newLDBStore(t)
   457  	ldb.setCapacity(uint64(capacity))
   458  
   459  	n := 7
   460  
   461  	chunks := []*Chunk{}
   462  	for i := 0; i < capacity; i++ {
   463  		c := GenerateRandomChunk(DefaultChunkSize)
   464  		chunks = append(chunks, c)
   465  		log.Trace("generate random chunk", "idx", i, "chunk", c)
   466  	}
   467  
   468  	for i := 0; i < n; i++ {
   469  		ldb.Put(context.TODO(), chunks[i])
   470  	}
   471  
   472  	// wait for all chunks to be stored before continuing
   473  	for i := 0; i < n; i++ {
   474  		<-chunks[i].dbStoredC
   475  	}
   476  
   477  	// delete all chunks
   478  	for i := 0; i < n; i++ {
   479  		key := chunks[i].Addr
   480  		ikey := getIndexKey(key)
   481  
   482  		var indx dpaDBIndex
   483  		ldb.tryAccessIdx(ikey, &indx)
   484  
   485  		ldb.delete(indx.Idx, ikey, ldb.po(key))
   486  	}
   487  
   488  	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
   489  
   490  	cleanup()
   491  
   492  	ldb, cleanup = newLDBStore(t)
   493  	ldb.setCapacity(uint64(capacity))
   494  
   495  	n = 10
   496  
   497  	for i := 0; i < n; i++ {
   498  		ldb.Put(context.TODO(), chunks[i])
   499  	}
   500  
   501  	// wait for all chunks to be stored before continuing
   502  	for i := 0; i < n; i++ {
   503  		<-chunks[i].dbStoredC
   504  	}
   505  
   506  	// expect for first chunk to be missing, because it has the smallest access value
   507  	idx := 0
   508  	ret, err := ldb.Get(context.TODO(), chunks[idx].Addr)
   509  	if err == nil || ret != nil {
   510  		t.Fatal("expected first chunk to be missing, but got no error")
   511  	}
   512  
   513  	// expect for last chunk to be present, as it has the largest access value
   514  	idx = 9
   515  	ret, err = ldb.Get(context.TODO(), chunks[idx].Addr)
   516  	if err != nil {
   517  		t.Fatalf("expected no error, but got %s", err)
   518  	}
   519  
   520  	if !bytes.Equal(ret.SData, chunks[idx].SData) {
   521  		t.Fatal("expected to get the same data back, but got smth else")
   522  	}
   523  }