gitlab.com/yannislg/go-pulse@v0.0.0-20210722055913-a3e24e95638d/core/state/snapshot/difflayer_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"math/rand"
    22  	"testing"
    23  
    24  	"github.com/VictoriaMetrics/fastcache"
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/crypto"
    27  	"github.com/ethereum/go-ethereum/ethdb/memorydb"
    28  )
    29  
    30  func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
    31  	copy := make(map[common.Hash]struct{})
    32  	for hash := range destructs {
    33  		copy[hash] = struct{}{}
    34  	}
    35  	return copy
    36  }
    37  
    38  func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
    39  	copy := make(map[common.Hash][]byte)
    40  	for hash, blob := range accounts {
    41  		copy[hash] = blob
    42  	}
    43  	return copy
    44  }
    45  
    46  func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
    47  	copy := make(map[common.Hash]map[common.Hash][]byte)
    48  	for accHash, slots := range storage {
    49  		copy[accHash] = make(map[common.Hash][]byte)
    50  		for slotHash, blob := range slots {
    51  			copy[accHash][slotHash] = blob
    52  		}
    53  	}
    54  	return copy
    55  }
    56  
    57  // TestMergeBasics tests some simple merges
    58  func TestMergeBasics(t *testing.T) {
    59  	var (
    60  		destructs = make(map[common.Hash]struct{})
    61  		accounts  = make(map[common.Hash][]byte)
    62  		storage   = make(map[common.Hash]map[common.Hash][]byte)
    63  	)
    64  	// Fill up a parent
    65  	for i := 0; i < 100; i++ {
    66  		h := randomHash()
    67  		data := randomAccount()
    68  
    69  		accounts[h] = data
    70  		if rand.Intn(4) == 0 {
    71  			destructs[h] = struct{}{}
    72  		}
    73  		if rand.Intn(2) == 0 {
    74  			accStorage := make(map[common.Hash][]byte)
    75  			value := make([]byte, 32)
    76  			rand.Read(value)
    77  			accStorage[randomHash()] = value
    78  			storage[h] = accStorage
    79  		}
    80  	}
    81  	// Add some (identical) layers on top
    82  	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    83  	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    84  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    85  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    86  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    87  	// And flatten
    88  	merged := (child.flatten()).(*diffLayer)
    89  
    90  	{ // Check account lists
    91  		if have, want := len(merged.accountList), 0; have != want {
    92  			t.Errorf("accountList wrong: have %v, want %v", have, want)
    93  		}
    94  		if have, want := len(merged.AccountList()), len(accounts); have != want {
    95  			t.Errorf("AccountList() wrong: have %v, want %v", have, want)
    96  		}
    97  		if have, want := len(merged.accountList), len(accounts); have != want {
    98  			t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
    99  		}
   100  	}
   101  	{ // Check account drops
   102  		if have, want := len(merged.destructSet), len(destructs); have != want {
   103  			t.Errorf("accountDrop wrong: have %v, want %v", have, want)
   104  		}
   105  	}
   106  	{ // Check storage lists
   107  		i := 0
   108  		for aHash, sMap := range storage {
   109  			if have, want := len(merged.storageList), i; have != want {
   110  				t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
   111  			}
   112  			if have, want := len(merged.StorageList(aHash)), len(sMap); have != want {
   113  				t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
   114  			}
   115  			if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
   116  				t.Errorf("storageList wrong: have %v, want %v", have, want)
   117  			}
   118  			i++
   119  		}
   120  	}
   121  }
   122  
   123  // TestMergeDelete tests some deletion
   124  func TestMergeDelete(t *testing.T) {
   125  	var (
   126  		storage = make(map[common.Hash]map[common.Hash][]byte)
   127  	)
   128  	// Fill up a parent
   129  	h1 := common.HexToHash("0x01")
   130  	h2 := common.HexToHash("0x02")
   131  
   132  	flipDrops := func() map[common.Hash]struct{} {
   133  		return map[common.Hash]struct{}{
   134  			h2: struct{}{},
   135  		}
   136  	}
   137  	flipAccs := func() map[common.Hash][]byte {
   138  		return map[common.Hash][]byte{
   139  			h1: randomAccount(),
   140  		}
   141  	}
   142  	flopDrops := func() map[common.Hash]struct{} {
   143  		return map[common.Hash]struct{}{
   144  			h1: struct{}{},
   145  		}
   146  	}
   147  	flopAccs := func() map[common.Hash][]byte {
   148  		return map[common.Hash][]byte{
   149  			h2: randomAccount(),
   150  		}
   151  	}
   152  	// Add some flipAccs-flopping layers on top
   153  	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
   154  	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   155  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   156  	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   157  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   158  	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   159  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   160  
   161  	if data, _ := child.Account(h1); data == nil {
   162  		t.Errorf("last diff layer: expected %x account to be non-nil", h1)
   163  	}
   164  	if data, _ := child.Account(h2); data != nil {
   165  		t.Errorf("last diff layer: expected %x account to be nil", h2)
   166  	}
   167  	if _, ok := child.destructSet[h1]; ok {
   168  		t.Errorf("last diff layer: expected %x drop to be missing", h1)
   169  	}
   170  	if _, ok := child.destructSet[h2]; !ok {
   171  		t.Errorf("last diff layer: expected %x drop to be present", h1)
   172  	}
   173  	// And flatten
   174  	merged := (child.flatten()).(*diffLayer)
   175  
   176  	if data, _ := merged.Account(h1); data == nil {
   177  		t.Errorf("merged layer: expected %x account to be non-nil", h1)
   178  	}
   179  	if data, _ := merged.Account(h2); data != nil {
   180  		t.Errorf("merged layer: expected %x account to be nil", h2)
   181  	}
   182  	if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
   183  		t.Errorf("merged diff layer: expected %x drop to be present", h1)
   184  	}
   185  	if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
   186  		t.Errorf("merged diff layer: expected %x drop to be present", h1)
   187  	}
   188  	// If we add more granular metering of memory, we can enable this again,
   189  	// but it's not implemented for now
   190  	//if have, want := merged.memory, child.memory; have != want {
   191  	//	t.Errorf("mem wrong: have %d, want %d", have, want)
   192  	//}
   193  }
   194  
   195  // This tests that if we create a new account, and set a slot, and then merge
   196  // it, the lists will be correct.
   197  func TestInsertAndMerge(t *testing.T) {
   198  	// Fill up a parent
   199  	var (
   200  		acc    = common.HexToHash("0x01")
   201  		slot   = common.HexToHash("0x02")
   202  		parent *diffLayer
   203  		child  *diffLayer
   204  	)
   205  	{
   206  		var (
   207  			destructs = make(map[common.Hash]struct{})
   208  			accounts  = make(map[common.Hash][]byte)
   209  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   210  		)
   211  		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
   212  	}
   213  	{
   214  		var (
   215  			destructs = make(map[common.Hash]struct{})
   216  			accounts  = make(map[common.Hash][]byte)
   217  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   218  		)
   219  		accounts[acc] = randomAccount()
   220  		storage[acc] = make(map[common.Hash][]byte)
   221  		storage[acc][slot] = []byte{0x01}
   222  		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   223  	}
   224  	// And flatten
   225  	merged := (child.flatten()).(*diffLayer)
   226  	{ // Check that slot value is present
   227  		have, _ := merged.Storage(acc, slot)
   228  		if want := []byte{0x01}; !bytes.Equal(have, want) {
   229  			t.Errorf("merged slot value wrong: have %x, want %x", have, want)
   230  		}
   231  	}
   232  }
   233  
   234  func emptyLayer() *diskLayer {
   235  	return &diskLayer{
   236  		diskdb: memorydb.New(),
   237  		cache:  fastcache.New(500 * 1024),
   238  	}
   239  }
   240  
   241  // BenchmarkSearch checks how long it takes to find a non-existing key
   242  // BenchmarkSearch-6   	  200000	     10481 ns/op (1K per layer)
   243  // BenchmarkSearch-6   	  200000	     10760 ns/op (10K per layer)
   244  // BenchmarkSearch-6   	  100000	     17866 ns/op
   245  //
   246  // BenchmarkSearch-6   	  500000	      3723 ns/op (10k per layer, only top-level RLock()
   247  func BenchmarkSearch(b *testing.B) {
   248  	// First, we set up 128 diff layers, with 1K items each
   249  	fill := func(parent snapshot) *diffLayer {
   250  		var (
   251  			destructs = make(map[common.Hash]struct{})
   252  			accounts  = make(map[common.Hash][]byte)
   253  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   254  		)
   255  		for i := 0; i < 10000; i++ {
   256  			accounts[randomHash()] = randomAccount()
   257  		}
   258  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   259  	}
   260  	var layer snapshot
   261  	layer = emptyLayer()
   262  	for i := 0; i < 128; i++ {
   263  		layer = fill(layer)
   264  	}
   265  	key := crypto.Keccak256Hash([]byte{0x13, 0x38})
   266  	b.ResetTimer()
   267  	for i := 0; i < b.N; i++ {
   268  		layer.AccountRLP(key)
   269  	}
   270  }
   271  
   272  // BenchmarkSearchSlot checks how long it takes to find a non-existing key
   273  // - Number of layers: 128
   274  // - Each layers contains the account, with a couple of storage slots
   275  // BenchmarkSearchSlot-6   	  100000	     14554 ns/op
   276  // BenchmarkSearchSlot-6   	  100000	     22254 ns/op (when checking parent root using mutex)
   277  // BenchmarkSearchSlot-6   	  100000	     14551 ns/op (when checking parent number using atomic)
   278  // With bloom filter:
   279  // BenchmarkSearchSlot-6   	 3467835	       351 ns/op
   280  func BenchmarkSearchSlot(b *testing.B) {
   281  	// First, we set up 128 diff layers, with 1K items each
   282  	accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
   283  	storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
   284  	accountRLP := randomAccount()
   285  	fill := func(parent snapshot) *diffLayer {
   286  		var (
   287  			destructs = make(map[common.Hash]struct{})
   288  			accounts  = make(map[common.Hash][]byte)
   289  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   290  		)
   291  		accounts[accountKey] = accountRLP
   292  
   293  		accStorage := make(map[common.Hash][]byte)
   294  		for i := 0; i < 5; i++ {
   295  			value := make([]byte, 32)
   296  			rand.Read(value)
   297  			accStorage[randomHash()] = value
   298  			storage[accountKey] = accStorage
   299  		}
   300  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   301  	}
   302  	var layer snapshot
   303  	layer = emptyLayer()
   304  	for i := 0; i < 128; i++ {
   305  		layer = fill(layer)
   306  	}
   307  	b.ResetTimer()
   308  	for i := 0; i < b.N; i++ {
   309  		layer.Storage(accountKey, storageKey)
   310  	}
   311  }
   312  
   313  // With accountList and sorting
   314  // BenchmarkFlatten-6   	      50	  29890856 ns/op
   315  //
   316  // Without sorting and tracking accountlist
   317  // BenchmarkFlatten-6   	     300	   5511511 ns/op
   318  func BenchmarkFlatten(b *testing.B) {
   319  	fill := func(parent snapshot) *diffLayer {
   320  		var (
   321  			destructs = make(map[common.Hash]struct{})
   322  			accounts  = make(map[common.Hash][]byte)
   323  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   324  		)
   325  		for i := 0; i < 100; i++ {
   326  			accountKey := randomHash()
   327  			accounts[accountKey] = randomAccount()
   328  
   329  			accStorage := make(map[common.Hash][]byte)
   330  			for i := 0; i < 20; i++ {
   331  				value := make([]byte, 32)
   332  				rand.Read(value)
   333  				accStorage[randomHash()] = value
   334  
   335  			}
   336  			storage[accountKey] = accStorage
   337  		}
   338  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   339  	}
   340  	b.ResetTimer()
   341  	for i := 0; i < b.N; i++ {
   342  		b.StopTimer()
   343  		var layer snapshot
   344  		layer = emptyLayer()
   345  		for i := 1; i < 128; i++ {
   346  			layer = fill(layer)
   347  		}
   348  		b.StartTimer()
   349  
   350  		for i := 1; i < 128; i++ {
   351  			dl, ok := layer.(*diffLayer)
   352  			if !ok {
   353  				break
   354  			}
   355  			layer = dl.flatten()
   356  		}
   357  		b.StopTimer()
   358  	}
   359  }
   360  
   361  // This test writes ~324M of diff layers to disk, spread over
   362  // - 128 individual layers,
   363  // - each with 200 accounts
   364  // - containing 200 slots
   365  //
   366  // BenchmarkJournal-6   	       1	1471373923 ns/ops
   367  // BenchmarkJournal-6   	       1	1208083335 ns/op // bufio writer
   368  func BenchmarkJournal(b *testing.B) {
   369  	fill := func(parent snapshot) *diffLayer {
   370  		var (
   371  			destructs = make(map[common.Hash]struct{})
   372  			accounts  = make(map[common.Hash][]byte)
   373  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   374  		)
   375  		for i := 0; i < 200; i++ {
   376  			accountKey := randomHash()
   377  			accounts[accountKey] = randomAccount()
   378  
   379  			accStorage := make(map[common.Hash][]byte)
   380  			for i := 0; i < 200; i++ {
   381  				value := make([]byte, 32)
   382  				rand.Read(value)
   383  				accStorage[randomHash()] = value
   384  
   385  			}
   386  			storage[accountKey] = accStorage
   387  		}
   388  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   389  	}
   390  	layer := snapshot(new(diskLayer))
   391  	for i := 1; i < 128; i++ {
   392  		layer = fill(layer)
   393  	}
   394  	b.ResetTimer()
   395  
   396  	for i := 0; i < b.N; i++ {
   397  		layer.Journal(new(bytes.Buffer))
   398  	}
   399  }