github.com/MetalBlockchain/subnet-evm@v0.4.9/core/state/snapshot/difflayer_test.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2019 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package snapshot 28 29 import ( 30 "bytes" 31 "math/rand" 32 "testing" 33 34 "github.com/MetalBlockchain/subnet-evm/ethdb/memorydb" 35 "github.com/MetalBlockchain/subnet-evm/utils" 36 "github.com/ethereum/go-ethereum/common" 37 "github.com/ethereum/go-ethereum/crypto" 38 ) 39 40 func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { 41 copy := make(map[common.Hash]struct{}) 42 for hash := range destructs { 43 copy[hash] = struct{}{} 44 } 45 return copy 46 } 47 48 func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte { 49 copy := make(map[common.Hash][]byte) 50 for hash, blob := range accounts { 51 copy[hash] = blob 52 } 53 return copy 54 } 55 56 func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { 57 copy := make(map[common.Hash]map[common.Hash][]byte) 58 for accHash, slots := range storage { 59 copy[accHash] = make(map[common.Hash][]byte) 60 for slotHash, blob := range slots { 61 copy[accHash][slotHash] = blob 62 } 63 } 64 return copy 65 } 66 67 // TestMergeBasics tests some simple merges 68 func TestMergeBasics(t *testing.T) { 69 var ( 70 destructs = make(map[common.Hash]struct{}) 71 accounts = make(map[common.Hash][]byte) 72 storage = make(map[common.Hash]map[common.Hash][]byte) 73 ) 74 // Fill up a parent 75 for i := 0; i < 100; i++ { 76 h := randomHash() 77 data := randomAccount() 78 79 accounts[h] = data 80 if rand.Intn(4) == 0 { 81 destructs[h] = struct{}{} 82 } 83 if rand.Intn(2) == 0 { 84 accStorage := make(map[common.Hash][]byte) 85 value := make([]byte, 32) 86 rand.Read(value) 87 accStorage[randomHash()] = value 88 storage[h] = accStorage 89 } 90 } 91 // Add some (identical) layers on top 92 parent := newDiffLayer(emptyLayer(), common.Hash{}, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) 93 child := newDiffLayer(parent, common.Hash{}, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) 94 child = newDiffLayer(child, common.Hash{}, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) 95 child = newDiffLayer(child, common.Hash{}, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) 96 child = newDiffLayer(child, common.Hash{}, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) 97 // And flatten 98 merged := (child.flatten()).(*diffLayer) 99 100 { // Check account lists 101 if have, want := len(merged.accountList), 0; have != want { 102 t.Errorf("accountList wrong: have %v, want %v", have, want) 103 } 104 if have, want := len(merged.AccountList()), len(accounts); have != want { 105 t.Errorf("AccountList() wrong: have %v, want %v", have, want) 106 } 107 if have, want := len(merged.accountList), len(accounts); have != want { 108 t.Errorf("accountList [2] wrong: have %v, want %v", have, want) 109 } 110 } 111 { // Check account drops 112 if have, want := len(merged.destructSet), len(destructs); have != want { 113 t.Errorf("accountDrop wrong: have %v, want %v", have, want) 114 } 115 } 116 { // Check storage lists 117 i := 0 118 for aHash, sMap := range storage { 119 if have, want := len(merged.storageList), i; have != want { 120 t.Errorf("[1] storageList wrong: have %v, want %v", have, want) 121 } 122 list, _ := merged.StorageList(aHash) 123 if have, want := len(list), len(sMap); have != want { 124 t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want) 125 } 126 if have, want := len(merged.storageList[aHash]), len(sMap); have != want { 127 t.Errorf("storageList wrong: have %v, want %v", have, want) 128 } 129 i++ 130 } 131 } 132 } 133 134 // TestMergeDelete tests some deletion 135 func TestMergeDelete(t *testing.T) { 136 var ( 137 storage = make(map[common.Hash]map[common.Hash][]byte) 138 ) 139 // Fill up a parent 140 h1 := common.HexToHash("0x01") 141 h2 := common.HexToHash("0x02") 142 143 flipDrops := func() map[common.Hash]struct{} { 144 return map[common.Hash]struct{}{ 145 h2: {}, 146 } 147 } 148 flipAccs := func() map[common.Hash][]byte { 149 return map[common.Hash][]byte{ 150 h1: randomAccount(), 151 } 152 } 153 flopDrops := func() map[common.Hash]struct{} { 154 return map[common.Hash]struct{}{ 155 h1: {}, 156 } 157 } 158 flopAccs := func() map[common.Hash][]byte { 159 return map[common.Hash][]byte{ 160 h2: randomAccount(), 161 } 162 } 163 // Add some flipAccs-flopping layers on top 164 parent := newDiffLayer(emptyLayer(), common.Hash{}, common.Hash{}, flipDrops(), flipAccs(), storage) 165 child := parent.Update(common.Hash{}, common.Hash{}, flopDrops(), flopAccs(), storage) 166 child = child.Update(common.Hash{}, common.Hash{}, flipDrops(), flipAccs(), storage) 167 child = child.Update(common.Hash{}, common.Hash{}, flopDrops(), flopAccs(), storage) 168 child = child.Update(common.Hash{}, common.Hash{}, flipDrops(), flipAccs(), storage) 169 child = child.Update(common.Hash{}, common.Hash{}, flopDrops(), flopAccs(), storage) 170 child = child.Update(common.Hash{}, common.Hash{}, flipDrops(), flipAccs(), storage) 171 172 if data, _ := child.Account(h1); data == nil { 173 t.Errorf("last diff layer: expected %x account to be non-nil", h1) 174 } 175 if data, _ := child.Account(h2); data != nil { 176 t.Errorf("last diff layer: expected %x account to be nil", h2) 177 } 178 if _, ok := child.destructSet[h1]; ok { 179 t.Errorf("last diff layer: expected %x drop to be missing", h1) 180 } 181 if _, ok := child.destructSet[h2]; !ok { 182 t.Errorf("last diff layer: expected %x drop to be present", h1) 183 } 184 // And flatten 185 merged := (child.flatten()).(*diffLayer) 186 187 if data, _ := merged.Account(h1); data == nil { 188 t.Errorf("merged layer: expected %x account to be non-nil", h1) 189 } 190 if data, _ := merged.Account(h2); data != nil { 191 t.Errorf("merged layer: expected %x account to be nil", h2) 192 } 193 if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk! 194 t.Errorf("merged diff layer: expected %x drop to be present", h1) 195 } 196 if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk! 197 t.Errorf("merged diff layer: expected %x drop to be present", h1) 198 } 199 // If we add more granular metering of memory, we can enable this again, 200 // but it's not implemented for now 201 //if have, want := merged.memory, child.memory; have != want { 202 // t.Errorf("mem wrong: have %d, want %d", have, want) 203 //} 204 } 205 206 // This tests that if we create a new account, and set a slot, and then merge 207 // it, the lists will be correct. 208 func TestInsertAndMerge(t *testing.T) { 209 // Fill up a parent 210 var ( 211 acc = common.HexToHash("0x01") 212 slot = common.HexToHash("0x02") 213 parent *diffLayer 214 child *diffLayer 215 ) 216 { 217 var ( 218 destructs = make(map[common.Hash]struct{}) 219 accounts = make(map[common.Hash][]byte) 220 storage = make(map[common.Hash]map[common.Hash][]byte) 221 ) 222 parent = newDiffLayer(emptyLayer(), common.Hash{}, common.Hash{}, destructs, accounts, storage) 223 } 224 { 225 var ( 226 destructs = make(map[common.Hash]struct{}) 227 accounts = make(map[common.Hash][]byte) 228 storage = make(map[common.Hash]map[common.Hash][]byte) 229 ) 230 accounts[acc] = randomAccount() 231 storage[acc] = make(map[common.Hash][]byte) 232 storage[acc][slot] = []byte{0x01} 233 child = newDiffLayer(parent, common.Hash{}, common.Hash{}, destructs, accounts, storage) 234 } 235 // And flatten 236 merged := (child.flatten()).(*diffLayer) 237 { // Check that slot value is present 238 have, _ := merged.Storage(acc, slot) 239 if want := []byte{0x01}; !bytes.Equal(have, want) { 240 t.Errorf("merged slot value wrong: have %x, want %x", have, want) 241 } 242 } 243 } 244 245 func emptyLayer() *diskLayer { 246 return &diskLayer{ 247 diskdb: memorydb.New(), 248 cache: utils.NewMeteredCache(500*1024, "", "", 0), 249 } 250 } 251 252 // BenchmarkSearch checks how long it takes to find a non-existing key 253 // BenchmarkSearch-6 200000 10481 ns/op (1K per layer) 254 // BenchmarkSearch-6 200000 10760 ns/op (10K per layer) 255 // BenchmarkSearch-6 100000 17866 ns/op 256 // 257 // BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock() 258 func BenchmarkSearch(b *testing.B) { 259 // First, we set up 128 diff layers, with 1K items each 260 fill := func(parent snapshot) *diffLayer { 261 var ( 262 destructs = make(map[common.Hash]struct{}) 263 accounts = make(map[common.Hash][]byte) 264 storage = make(map[common.Hash]map[common.Hash][]byte) 265 ) 266 for i := 0; i < 10000; i++ { 267 accounts[randomHash()] = randomAccount() 268 } 269 return newDiffLayer(parent, common.Hash{}, common.Hash{}, destructs, accounts, storage) 270 } 271 var layer snapshot 272 layer = emptyLayer() 273 for i := 0; i < 128; i++ { 274 layer = fill(layer) 275 } 276 key := crypto.Keccak256Hash([]byte{0x13, 0x38}) 277 b.ResetTimer() 278 for i := 0; i < b.N; i++ { 279 layer.AccountRLP(key) 280 } 281 } 282 283 // BenchmarkSearchSlot checks how long it takes to find a non-existing key 284 // - Number of layers: 128 285 // - Each layers contains the account, with a couple of storage slots 286 // BenchmarkSearchSlot-6 100000 14554 ns/op 287 // BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex) 288 // BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic) 289 // With bloom filter: 290 // BenchmarkSearchSlot-6 3467835 351 ns/op 291 func BenchmarkSearchSlot(b *testing.B) { 292 // First, we set up 128 diff layers, with 1K items each 293 accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) 294 storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) 295 accountRLP := randomAccount() 296 fill := func(parent snapshot) *diffLayer { 297 var ( 298 destructs = make(map[common.Hash]struct{}) 299 accounts = make(map[common.Hash][]byte) 300 storage = make(map[common.Hash]map[common.Hash][]byte) 301 ) 302 accounts[accountKey] = accountRLP 303 304 accStorage := make(map[common.Hash][]byte) 305 for i := 0; i < 5; i++ { 306 value := make([]byte, 32) 307 rand.Read(value) 308 accStorage[randomHash()] = value 309 storage[accountKey] = accStorage 310 } 311 return newDiffLayer(parent, common.Hash{}, common.Hash{}, destructs, accounts, storage) 312 } 313 var layer snapshot 314 layer = emptyLayer() 315 for i := 0; i < 128; i++ { 316 layer = fill(layer) 317 } 318 b.ResetTimer() 319 for i := 0; i < b.N; i++ { 320 layer.Storage(accountKey, storageKey) 321 } 322 } 323 324 // With accountList and sorting 325 // BenchmarkFlatten-6 50 29890856 ns/op 326 // 327 // Without sorting and tracking accountList 328 // BenchmarkFlatten-6 300 5511511 ns/op 329 func BenchmarkFlatten(b *testing.B) { 330 fill := func(parent snapshot) *diffLayer { 331 var ( 332 destructs = make(map[common.Hash]struct{}) 333 accounts = make(map[common.Hash][]byte) 334 storage = make(map[common.Hash]map[common.Hash][]byte) 335 ) 336 for i := 0; i < 100; i++ { 337 accountKey := randomHash() 338 accounts[accountKey] = randomAccount() 339 340 accStorage := make(map[common.Hash][]byte) 341 for i := 0; i < 20; i++ { 342 value := make([]byte, 32) 343 rand.Read(value) 344 accStorage[randomHash()] = value 345 } 346 storage[accountKey] = accStorage 347 } 348 return newDiffLayer(parent, common.Hash{}, common.Hash{}, destructs, accounts, storage) 349 } 350 b.ResetTimer() 351 for i := 0; i < b.N; i++ { 352 b.StopTimer() 353 var layer snapshot 354 layer = emptyLayer() 355 for i := 1; i < 128; i++ { 356 layer = fill(layer) 357 } 358 b.StartTimer() 359 360 for i := 1; i < 128; i++ { 361 dl, ok := layer.(*diffLayer) 362 if !ok { 363 break 364 } 365 layer = dl.flatten() 366 } 367 b.StopTimer() 368 } 369 }