github.com/core-coin/go-core/v2@v2.1.9/core/state/snapshot/difflayer_test.go (about)

     1  // Copyright 2019 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"math/rand"
    22  	"testing"
    23  
    24  	"github.com/VictoriaMetrics/fastcache"
    25  
    26  	"github.com/core-coin/go-core/v2/xcbdb/memorydb"
    27  
    28  	"github.com/core-coin/go-core/v2/common"
    29  	"github.com/core-coin/go-core/v2/crypto"
    30  )
    31  
    32  func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
    33  	copy := make(map[common.Hash]struct{})
    34  	for hash := range destructs {
    35  		copy[hash] = struct{}{}
    36  	}
    37  	return copy
    38  }
    39  
    40  func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
    41  	copy := make(map[common.Hash][]byte)
    42  	for hash, blob := range accounts {
    43  		copy[hash] = blob
    44  	}
    45  	return copy
    46  }
    47  
    48  func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
    49  	copy := make(map[common.Hash]map[common.Hash][]byte)
    50  	for accHash, slots := range storage {
    51  		copy[accHash] = make(map[common.Hash][]byte)
    52  		for slotHash, blob := range slots {
    53  			copy[accHash][slotHash] = blob
    54  		}
    55  	}
    56  	return copy
    57  }
    58  
    59  // TestMergeBasics tests some simple merges
    60  func TestMergeBasics(t *testing.T) {
    61  	var (
    62  		destructs = make(map[common.Hash]struct{})
    63  		accounts  = make(map[common.Hash][]byte)
    64  		storage   = make(map[common.Hash]map[common.Hash][]byte)
    65  	)
    66  	// Fill up a parent
    67  	for i := 0; i < 100; i++ {
    68  		h := randomHash()
    69  		data := randomAccount()
    70  
    71  		accounts[h] = data
    72  		if rand.Intn(4) == 0 {
    73  			destructs[h] = struct{}{}
    74  		}
    75  		if rand.Intn(2) == 0 {
    76  			accStorage := make(map[common.Hash][]byte)
    77  			value := make([]byte, 32)
    78  			rand.Read(value)
    79  			accStorage[randomHash()] = value
    80  			storage[h] = accStorage
    81  		}
    82  	}
    83  	// Add some (identical) layers on top
    84  	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    85  	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    86  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    87  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    88  	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
    89  	// And flatten
    90  	merged := (child.flatten()).(*diffLayer)
    91  
    92  	{ // Check account lists
    93  		if have, want := len(merged.accountList), 0; have != want {
    94  			t.Errorf("accountList wrong: have %v, want %v", have, want)
    95  		}
    96  		if have, want := len(merged.AccountList()), len(accounts); have != want {
    97  			t.Errorf("AccountList() wrong: have %v, want %v", have, want)
    98  		}
    99  		if have, want := len(merged.accountList), len(accounts); have != want {
   100  			t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
   101  		}
   102  	}
   103  	{ // Check account drops
   104  		if have, want := len(merged.destructSet), len(destructs); have != want {
   105  			t.Errorf("accountDrop wrong: have %v, want %v", have, want)
   106  		}
   107  	}
   108  	{ // Check storage lists
   109  		i := 0
   110  		for aHash, sMap := range storage {
   111  			if have, want := len(merged.storageList), i; have != want {
   112  				t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
   113  			}
   114  			list, _ := merged.StorageList(aHash)
   115  			if have, want := len(list), len(sMap); have != want {
   116  				t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
   117  			}
   118  			if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
   119  				t.Errorf("storageList wrong: have %v, want %v", have, want)
   120  			}
   121  			i++
   122  		}
   123  	}
   124  }
   125  
   126  // TestMergeDelete tests some deletion
   127  func TestMergeDelete(t *testing.T) {
   128  	var (
   129  		storage = make(map[common.Hash]map[common.Hash][]byte)
   130  	)
   131  	// Fill up a parent
   132  	h1 := common.HexToHash("0x01")
   133  	h2 := common.HexToHash("0x02")
   134  
   135  	flipDrops := func() map[common.Hash]struct{} {
   136  		return map[common.Hash]struct{}{
   137  			h2: {},
   138  		}
   139  	}
   140  	flipAccs := func() map[common.Hash][]byte {
   141  		return map[common.Hash][]byte{
   142  			h1: randomAccount(),
   143  		}
   144  	}
   145  	flopDrops := func() map[common.Hash]struct{} {
   146  		return map[common.Hash]struct{}{
   147  			h1: {},
   148  		}
   149  	}
   150  	flopAccs := func() map[common.Hash][]byte {
   151  		return map[common.Hash][]byte{
   152  			h2: randomAccount(),
   153  		}
   154  	}
   155  	// Add some flipAccs-flopping layers on top
   156  	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
   157  	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   158  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   159  	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   160  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   161  	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
   162  	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
   163  
   164  	if data, _ := child.Account(h1); data == nil {
   165  		t.Errorf("last diff layer: expected %x account to be non-nil", h1)
   166  	}
   167  	if data, _ := child.Account(h2); data != nil {
   168  		t.Errorf("last diff layer: expected %x account to be nil", h2)
   169  	}
   170  	if _, ok := child.destructSet[h1]; ok {
   171  		t.Errorf("last diff layer: expected %x drop to be missing", h1)
   172  	}
   173  	if _, ok := child.destructSet[h2]; !ok {
   174  		t.Errorf("last diff layer: expected %x drop to be present", h1)
   175  	}
   176  	// And flatten
   177  	merged := (child.flatten()).(*diffLayer)
   178  
   179  	if data, _ := merged.Account(h1); data == nil {
   180  		t.Errorf("merged layer: expected %x account to be non-nil", h1)
   181  	}
   182  	if data, _ := merged.Account(h2); data != nil {
   183  		t.Errorf("merged layer: expected %x account to be nil", h2)
   184  	}
   185  	if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
   186  		t.Errorf("merged diff layer: expected %x drop to be present", h1)
   187  	}
   188  	if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
   189  		t.Errorf("merged diff layer: expected %x drop to be present", h1)
   190  	}
   191  	// If we add more granular metering of memory, we can enable this again,
   192  	// but it's not implemented for now
   193  	//if have, want := merged.memory, child.memory; have != want {
   194  	//	t.Errorf("mem wrong: have %d, want %d", have, want)
   195  	//}
   196  }
   197  
   198  // This tests that if we create a new account, and set a slot, and then merge
   199  // it, the lists will be correct.
   200  func TestInsertAndMerge(t *testing.T) {
   201  	// Fill up a parent
   202  	var (
   203  		acc    = common.HexToHash("0x01")
   204  		slot   = common.HexToHash("0x02")
   205  		parent *diffLayer
   206  		child  *diffLayer
   207  	)
   208  	{
   209  		var (
   210  			destructs = make(map[common.Hash]struct{})
   211  			accounts  = make(map[common.Hash][]byte)
   212  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   213  		)
   214  		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
   215  	}
   216  	{
   217  		var (
   218  			destructs = make(map[common.Hash]struct{})
   219  			accounts  = make(map[common.Hash][]byte)
   220  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   221  		)
   222  		accounts[acc] = randomAccount()
   223  		storage[acc] = make(map[common.Hash][]byte)
   224  		storage[acc][slot] = []byte{0x01}
   225  		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   226  	}
   227  	// And flatten
   228  	merged := (child.flatten()).(*diffLayer)
   229  	{ // Check that slot value is present
   230  		have, _ := merged.Storage(acc, slot)
   231  		if want := []byte{0x01}; !bytes.Equal(have, want) {
   232  			t.Errorf("merged slot value wrong: have %x, want %x", have, want)
   233  		}
   234  	}
   235  }
   236  
   237  func emptyLayer() *diskLayer {
   238  	return &diskLayer{
   239  		diskdb: memorydb.New(),
   240  		cache:  fastcache.New(500 * 1024),
   241  	}
   242  }
   243  
   244  // BenchmarkSearch checks how long it takes to find a non-existing key
   245  // BenchmarkSearch-6   	  200000	     10481 ns/op (1K per layer)
   246  // BenchmarkSearch-6   	  200000	     10760 ns/op (10K per layer)
   247  // BenchmarkSearch-6   	  100000	     17866 ns/op
   248  //
   249  // BenchmarkSearch-6   	  500000	      3723 ns/op (10k per layer, only top-level RLock()
   250  func BenchmarkSearch(b *testing.B) {
   251  	// First, we set up 128 diff layers, with 1K items each
   252  	fill := func(parent snapshot) *diffLayer {
   253  		var (
   254  			destructs = make(map[common.Hash]struct{})
   255  			accounts  = make(map[common.Hash][]byte)
   256  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   257  		)
   258  		for i := 0; i < 10000; i++ {
   259  			accounts[randomHash()] = randomAccount()
   260  		}
   261  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   262  	}
   263  	var layer snapshot
   264  	layer = emptyLayer()
   265  	for i := 0; i < 128; i++ {
   266  		layer = fill(layer)
   267  	}
   268  	key := crypto.SHA3Hash([]byte{0x13, 0x38})
   269  	b.ResetTimer()
   270  	for i := 0; i < b.N; i++ {
   271  		layer.AccountRLP(key)
   272  	}
   273  }
   274  
   275  // BenchmarkSearchSlot checks how long it takes to find a non-existing key
   276  // - Number of layers: 128
   277  // - Each layers contains the account, with a couple of storage slots
   278  // BenchmarkSearchSlot-6   	  100000	     14554 ns/op
   279  // BenchmarkSearchSlot-6   	  100000	     22254 ns/op (when checking parent root using mutex)
   280  // BenchmarkSearchSlot-6   	  100000	     14551 ns/op (when checking parent number using atomic)
   281  // With bloom filter:
   282  // BenchmarkSearchSlot-6   	 3467835	       351 ns/op
   283  func BenchmarkSearchSlot(b *testing.B) {
   284  	// First, we set up 128 diff layers, with 1K items each
   285  	accountKey := crypto.SHA3Hash([]byte{0x13, 0x37})
   286  	storageKey := crypto.SHA3Hash([]byte{0x13, 0x37})
   287  	accountRLP := randomAccount()
   288  	fill := func(parent snapshot) *diffLayer {
   289  		var (
   290  			destructs = make(map[common.Hash]struct{})
   291  			accounts  = make(map[common.Hash][]byte)
   292  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   293  		)
   294  		accounts[accountKey] = accountRLP
   295  
   296  		accStorage := make(map[common.Hash][]byte)
   297  		for i := 0; i < 5; i++ {
   298  			value := make([]byte, 32)
   299  			rand.Read(value)
   300  			accStorage[randomHash()] = value
   301  			storage[accountKey] = accStorage
   302  		}
   303  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   304  	}
   305  	var layer snapshot
   306  	layer = emptyLayer()
   307  	for i := 0; i < 128; i++ {
   308  		layer = fill(layer)
   309  	}
   310  	b.ResetTimer()
   311  	for i := 0; i < b.N; i++ {
   312  		layer.Storage(accountKey, storageKey)
   313  	}
   314  }
   315  
   316  // With accountList and sorting
   317  // BenchmarkFlatten-6   	      50	  29890856 ns/op
   318  //
   319  // Without sorting and tracking accountlist
   320  // BenchmarkFlatten-6   	     300	   5511511 ns/op
   321  func BenchmarkFlatten(b *testing.B) {
   322  	fill := func(parent snapshot) *diffLayer {
   323  		var (
   324  			destructs = make(map[common.Hash]struct{})
   325  			accounts  = make(map[common.Hash][]byte)
   326  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   327  		)
   328  		for i := 0; i < 100; i++ {
   329  			accountKey := randomHash()
   330  			accounts[accountKey] = randomAccount()
   331  
   332  			accStorage := make(map[common.Hash][]byte)
   333  			for i := 0; i < 20; i++ {
   334  				value := make([]byte, 32)
   335  				rand.Read(value)
   336  				accStorage[randomHash()] = value
   337  
   338  			}
   339  			storage[accountKey] = accStorage
   340  		}
   341  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   342  	}
   343  	b.ResetTimer()
   344  	for i := 0; i < b.N; i++ {
   345  		b.StopTimer()
   346  		var layer snapshot
   347  		layer = emptyLayer()
   348  		for i := 1; i < 128; i++ {
   349  			layer = fill(layer)
   350  		}
   351  		b.StartTimer()
   352  
   353  		for i := 1; i < 128; i++ {
   354  			dl, ok := layer.(*diffLayer)
   355  			if !ok {
   356  				break
   357  			}
   358  			layer = dl.flatten()
   359  		}
   360  		b.StopTimer()
   361  	}
   362  }
   363  
   364  // This test writes ~324M of diff layers to disk, spread over
   365  // - 128 individual layers,
   366  // - each with 200 accounts
   367  // - containing 200 slots
   368  //
   369  // BenchmarkJournal-6   	       1	1471373923 ns/ops
   370  // BenchmarkJournal-6   	       1	1208083335 ns/op // bufio writer
   371  func BenchmarkJournal(b *testing.B) {
   372  	fill := func(parent snapshot) *diffLayer {
   373  		var (
   374  			destructs = make(map[common.Hash]struct{})
   375  			accounts  = make(map[common.Hash][]byte)
   376  			storage   = make(map[common.Hash]map[common.Hash][]byte)
   377  		)
   378  		for i := 0; i < 200; i++ {
   379  			accountKey := randomHash()
   380  			accounts[accountKey] = randomAccount()
   381  
   382  			accStorage := make(map[common.Hash][]byte)
   383  			for i := 0; i < 200; i++ {
   384  				value := make([]byte, 32)
   385  				rand.Read(value)
   386  				accStorage[randomHash()] = value
   387  
   388  			}
   389  			storage[accountKey] = accStorage
   390  		}
   391  		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
   392  	}
   393  	layer := snapshot(new(diskLayer))
   394  	for i := 1; i < 128; i++ {
   395  		layer = fill(layer)
   396  	}
   397  	b.ResetTimer()
   398  
   399  	for i := 0; i < b.N; i++ {
   400  		layer.Journal(new(bytes.Buffer))
   401  	}
   402  }