github.com/xpaymentsorg/go-xpayments@v1.9.7/core/state/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package state 18 19 import ( 20 "bytes" 21 "math/big" 22 "testing" 23 24 "github.com/ethereum/go-ethereum/common" 25 "github.com/ethereum/go-ethereum/core/rawdb" 26 "github.com/ethereum/go-ethereum/crypto" 27 "github.com/ethereum/go-ethereum/ethdb" 28 "github.com/ethereum/go-ethereum/ethdb/memorydb" 29 "github.com/ethereum/go-ethereum/trie" 30 ) 31 32 // testAccount is the data associated with an account used by the state tests. 33 type testAccount struct { 34 address common.Address 35 balance *big.Int 36 nonce uint64 37 code []byte 38 } 39 40 // makeTestState create a sample test state to test node-wise reconstruction. 41 func makeTestState() (Database, common.Hash, []*testAccount) { 42 // Create an empty state 43 db := NewDatabase(rawdb.NewMemoryDatabase()) 44 state, _ := New(common.Hash{}, db) 45 46 // Fill it with some arbitrary data 47 accounts := []*testAccount{} 48 for i := byte(0); i < 96; i++ { 49 obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) 50 acc := &testAccount{address: common.BytesToAddress([]byte{i})} 51 52 obj.AddBalance(big.NewInt(int64(11 * i))) 53 acc.balance = big.NewInt(int64(11 * i)) 54 55 obj.SetNonce(uint64(42 * i)) 56 acc.nonce = uint64(42 * i) 57 58 if i%3 == 0 { 59 obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) 60 acc.code = []byte{i, i, i, i, i} 61 } 62 state.updateStateObject(obj) 63 accounts = append(accounts, acc) 64 } 65 root, _ := state.Commit(false) 66 67 // Return the generated state 68 return db, root, accounts 69 } 70 71 // checkStateAccounts cross references a reconstructed state with an expected 72 // account array. 73 func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { 74 // Check root availability and state contents 75 state, err := New(root, NewDatabase(db)) 76 if err != nil { 77 t.Fatalf("failed to create state trie at %x: %v", root, err) 78 } 79 if err := checkStateConsistency(db, root); err != nil { 80 t.Fatalf("inconsistent state trie at %x: %v", root, err) 81 } 82 for i, acc := range accounts { 83 if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 { 84 t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance) 85 } 86 if nonce := state.GetNonce(acc.address); nonce != acc.nonce { 87 t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce) 88 } 89 if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) { 90 t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code) 91 } 92 } 93 } 94 95 // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. 96 func checkTrieConsistency(db ethdb.Database, root common.Hash) error { 97 if v, _ := db.Get(root[:]); v == nil { 98 return nil // Consider a non existent state consistent. 99 } 100 trie, err := trie.New(root, trie.NewDatabase(db)) 101 if err != nil { 102 return err 103 } 104 it := trie.NodeIterator(nil) 105 for it.Next(true) { 106 } 107 return it.Error() 108 } 109 110 // checkStateConsistency checks that all data of a state root is present. 111 func checkStateConsistency(db ethdb.Database, root common.Hash) error { 112 // Create and iterate a state trie rooted in a sub-node 113 if _, err := db.Get(root.Bytes()); err != nil { 114 return nil // Consider a non existent state consistent. 115 } 116 state, err := New(root, NewDatabase(db)) 117 if err != nil { 118 return err 119 } 120 it := NewNodeIterator(state) 121 for it.Next() { 122 } 123 return it.Error 124 } 125 126 // Tests that an empty state is not scheduled for syncing. 127 func TestEmptyStateSync(t *testing.T) { 128 empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 129 if req := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New())).Missing(1); len(req) != 0 { 130 t.Errorf("content requested for empty state: %v", req) 131 } 132 } 133 134 // Tests that given a root hash, a state can sync iteratively on a single thread, 135 // requesting retrieval tasks and returning all of them in one go. 136 func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } 137 func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } 138 139 func testIterativeStateSync(t *testing.T, count int) { 140 // Create a random state to copy 141 srcDb, srcRoot, srcAccounts := makeTestState() 142 143 // Create a destination state and sync with the scheduler 144 dstDb := rawdb.NewMemoryDatabase() 145 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) 146 147 queue := append([]common.Hash{}, sched.Missing(count)...) 148 for len(queue) > 0 { 149 results := make([]trie.SyncResult, len(queue)) 150 for i, hash := range queue { 151 data, err := srcDb.TrieDB().Node(hash) 152 if err != nil { 153 t.Fatalf("failed to retrieve node data for %x", hash) 154 } 155 results[i] = trie.SyncResult{Hash: hash, Data: data} 156 } 157 if _, index, err := sched.Process(results); err != nil { 158 t.Fatalf("failed to process result #%d: %v", index, err) 159 } 160 batch := dstDb.NewBatch() 161 if err := sched.Commit(batch); err != nil { 162 t.Fatalf("failed to commit data: %v", err) 163 } 164 batch.Write() 165 queue = append(queue[:0], sched.Missing(count)...) 166 } 167 // Cross check that the two states are in sync 168 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 169 } 170 171 // Tests that the trie scheduler can correctly reconstruct the state even if only 172 // partial results are returned, and the others sent only later. 173 func TestIterativeDelayedStateSync(t *testing.T) { 174 // Create a random state to copy 175 srcDb, srcRoot, srcAccounts := makeTestState() 176 177 // Create a destination state and sync with the scheduler 178 dstDb := rawdb.NewMemoryDatabase() 179 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) 180 181 queue := append([]common.Hash{}, sched.Missing(0)...) 182 for len(queue) > 0 { 183 // Sync only half of the scheduled nodes 184 results := make([]trie.SyncResult, len(queue)/2+1) 185 for i, hash := range queue[:len(results)] { 186 data, err := srcDb.TrieDB().Node(hash) 187 if err != nil { 188 t.Fatalf("failed to retrieve node data for %x", hash) 189 } 190 results[i] = trie.SyncResult{Hash: hash, Data: data} 191 } 192 if _, index, err := sched.Process(results); err != nil { 193 t.Fatalf("failed to process result #%d: %v", index, err) 194 } 195 batch := dstDb.NewBatch() 196 if err := sched.Commit(batch); err != nil { 197 t.Fatalf("failed to commit data: %v", err) 198 } 199 batch.Write() 200 queue = append(queue[len(results):], sched.Missing(0)...) 201 } 202 // Cross check that the two states are in sync 203 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 204 } 205 206 // Tests that given a root hash, a trie can sync iteratively on a single thread, 207 // requesting retrieval tasks and returning all of them in one go, however in a 208 // random order. 209 func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } 210 func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } 211 212 func testIterativeRandomStateSync(t *testing.T, count int) { 213 // Create a random state to copy 214 srcDb, srcRoot, srcAccounts := makeTestState() 215 216 // Create a destination state and sync with the scheduler 217 dstDb := rawdb.NewMemoryDatabase() 218 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) 219 220 queue := make(map[common.Hash]struct{}) 221 for _, hash := range sched.Missing(count) { 222 queue[hash] = struct{}{} 223 } 224 for len(queue) > 0 { 225 // Fetch all the queued nodes in a random order 226 results := make([]trie.SyncResult, 0, len(queue)) 227 for hash := range queue { 228 data, err := srcDb.TrieDB().Node(hash) 229 if err != nil { 230 t.Fatalf("failed to retrieve node data for %x", hash) 231 } 232 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 233 } 234 // Feed the retrieved results back and queue new tasks 235 if _, index, err := sched.Process(results); err != nil { 236 t.Fatalf("failed to process result #%d: %v", index, err) 237 } 238 batch := dstDb.NewBatch() 239 if err := sched.Commit(batch); err != nil { 240 t.Fatalf("failed to commit data: %v", err) 241 } 242 batch.Write() 243 queue = make(map[common.Hash]struct{}) 244 for _, hash := range sched.Missing(count) { 245 queue[hash] = struct{}{} 246 } 247 } 248 // Cross check that the two states are in sync 249 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 250 } 251 252 // Tests that the trie scheduler can correctly reconstruct the state even if only 253 // partial results are returned (Even those randomly), others sent only later. 254 func TestIterativeRandomDelayedStateSync(t *testing.T) { 255 // Create a random state to copy 256 srcDb, srcRoot, srcAccounts := makeTestState() 257 258 // Create a destination state and sync with the scheduler 259 dstDb := rawdb.NewMemoryDatabase() 260 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) 261 262 queue := make(map[common.Hash]struct{}) 263 for _, hash := range sched.Missing(0) { 264 queue[hash] = struct{}{} 265 } 266 for len(queue) > 0 { 267 // Sync only half of the scheduled nodes, even those in random order 268 results := make([]trie.SyncResult, 0, len(queue)/2+1) 269 for hash := range queue { 270 delete(queue, hash) 271 272 data, err := srcDb.TrieDB().Node(hash) 273 if err != nil { 274 t.Fatalf("failed to retrieve node data for %x", hash) 275 } 276 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 277 278 if len(results) >= cap(results) { 279 break 280 } 281 } 282 // Feed the retrieved results back and queue new tasks 283 if _, index, err := sched.Process(results); err != nil { 284 t.Fatalf("failed to process result #%d: %v", index, err) 285 } 286 batch := dstDb.NewBatch() 287 if err := sched.Commit(batch); err != nil { 288 t.Fatalf("failed to commit data: %v", err) 289 } 290 batch.Write() 291 for _, hash := range sched.Missing(0) { 292 queue[hash] = struct{}{} 293 } 294 } 295 // Cross check that the two states are in sync 296 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 297 } 298 299 // Tests that at any point in time during a sync, only complete sub-tries are in 300 // the database. 301 func TestIncompleteStateSync(t *testing.T) { 302 // Create a random state to copy 303 srcDb, srcRoot, srcAccounts := makeTestState() 304 305 checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) 306 307 // Create a destination state and sync with the scheduler 308 dstDb := rawdb.NewMemoryDatabase() 309 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) 310 311 added := []common.Hash{} 312 queue := append([]common.Hash{}, sched.Missing(1)...) 313 for len(queue) > 0 { 314 // Fetch a batch of state nodes 315 results := make([]trie.SyncResult, len(queue)) 316 for i, hash := range queue { 317 data, err := srcDb.TrieDB().Node(hash) 318 if err != nil { 319 t.Fatalf("failed to retrieve node data for %x", hash) 320 } 321 results[i] = trie.SyncResult{Hash: hash, Data: data} 322 } 323 // Process each of the state nodes 324 if _, index, err := sched.Process(results); err != nil { 325 t.Fatalf("failed to process result #%d: %v", index, err) 326 } 327 batch := dstDb.NewBatch() 328 if err := sched.Commit(batch); err != nil { 329 t.Fatalf("failed to commit data: %v", err) 330 } 331 batch.Write() 332 for _, result := range results { 333 added = append(added, result.Hash) 334 } 335 // Check that all known sub-tries added so far are complete or missing entirely. 336 checkSubtries: 337 for _, hash := range added { 338 for _, acc := range srcAccounts { 339 if hash == crypto.Keccak256Hash(acc.code) { 340 continue checkSubtries // skip trie check of code nodes. 341 } 342 } 343 // Can't use checkStateConsistency here because subtrie keys may have odd 344 // length and crash in LeafKey. 345 if err := checkTrieConsistency(dstDb, hash); err != nil { 346 t.Fatalf("state inconsistent: %v", err) 347 } 348 } 349 // Fetch the next batch to retrieve 350 queue = append(queue[:0], sched.Missing(1)...) 351 } 352 // Sanity check that removing any node from the database is detected 353 for _, node := range added[1:] { 354 key := node.Bytes() 355 value, _ := dstDb.Get(key) 356 357 dstDb.Delete(key) 358 if err := checkStateConsistency(dstDb, added[0]); err == nil { 359 t.Fatalf("trie inconsistency not caught, missing: %x", key) 360 } 361 dstDb.Put(key, value) 362 } 363 }