github.com/luckypickle/go-ethereum-vet@v1.14.2/core/state/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package state 18 19 import ( 20 "bytes" 21 "math/big" 22 "testing" 23 24 "github.com/luckypickle/go-ethereum-vet/common" 25 "github.com/luckypickle/go-ethereum-vet/crypto" 26 "github.com/luckypickle/go-ethereum-vet/ethdb" 27 "github.com/luckypickle/go-ethereum-vet/trie" 28 ) 29 30 // testAccount is the data associated with an account used by the state tests. 31 type testAccount struct { 32 address common.Address 33 balance *big.Int 34 nonce uint64 35 code []byte 36 } 37 38 // makeTestState create a sample test state to test node-wise reconstruction. 39 func makeTestState() (Database, common.Hash, []*testAccount) { 40 // Create an empty state 41 db := NewDatabase(ethdb.NewMemDatabase()) 42 state, _ := New(common.Hash{}, db) 43 44 // Fill it with some arbitrary data 45 accounts := []*testAccount{} 46 for i := byte(0); i < 96; i++ { 47 obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) 48 acc := &testAccount{address: common.BytesToAddress([]byte{i})} 49 50 obj.AddBalance(big.NewInt(int64(11 * i))) 51 acc.balance = big.NewInt(int64(11 * i)) 52 53 obj.SetNonce(uint64(42 * i)) 54 acc.nonce = uint64(42 * i) 55 56 if i%3 == 0 { 57 obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) 58 acc.code = []byte{i, i, i, i, i} 59 } 60 state.updateStateObject(obj) 61 accounts = append(accounts, acc) 62 } 63 root, _ := state.Commit(false) 64 65 // Return the generated state 66 return db, root, accounts 67 } 68 69 // checkStateAccounts cross references a reconstructed state with an expected 70 // account array. 71 func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { 72 // Check root availability and state contents 73 state, err := New(root, NewDatabase(db)) 74 if err != nil { 75 t.Fatalf("failed to create state trie at %x: %v", root, err) 76 } 77 if err := checkStateConsistency(db, root); err != nil { 78 t.Fatalf("inconsistent state trie at %x: %v", root, err) 79 } 80 for i, acc := range accounts { 81 if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 { 82 t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance) 83 } 84 if nonce := state.GetNonce(acc.address); nonce != acc.nonce { 85 t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce) 86 } 87 if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) { 88 t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code) 89 } 90 } 91 } 92 93 // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. 94 func checkTrieConsistency(db ethdb.Database, root common.Hash) error { 95 if v, _ := db.Get(root[:]); v == nil { 96 return nil // Consider a non existent state consistent. 97 } 98 trie, err := trie.New(root, trie.NewDatabase(db)) 99 if err != nil { 100 return err 101 } 102 it := trie.NodeIterator(nil) 103 for it.Next(true) { 104 } 105 return it.Error() 106 } 107 108 // checkStateConsistency checks that all data of a state root is present. 109 func checkStateConsistency(db ethdb.Database, root common.Hash) error { 110 // Create and iterate a state trie rooted in a sub-node 111 if _, err := db.Get(root.Bytes()); err != nil { 112 return nil // Consider a non existent state consistent. 113 } 114 state, err := New(root, NewDatabase(db)) 115 if err != nil { 116 return err 117 } 118 it := NewNodeIterator(state) 119 for it.Next() { 120 } 121 return it.Error 122 } 123 124 // Tests that an empty state is not scheduled for syncing. 125 func TestEmptyStateSync(t *testing.T) { 126 empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 127 if req := NewStateSync(empty, ethdb.NewMemDatabase()).Missing(1); len(req) != 0 { 128 t.Errorf("content requested for empty state: %v", req) 129 } 130 } 131 132 // Tests that given a root hash, a state can sync iteratively on a single thread, 133 // requesting retrieval tasks and returning all of them in one go. 134 func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } 135 func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } 136 137 func testIterativeStateSync(t *testing.T, batch int) { 138 // Create a random state to copy 139 srcDb, srcRoot, srcAccounts := makeTestState() 140 141 // Create a destination state and sync with the scheduler 142 dstDb := ethdb.NewMemDatabase() 143 sched := NewStateSync(srcRoot, dstDb) 144 145 queue := append([]common.Hash{}, sched.Missing(batch)...) 146 for len(queue) > 0 { 147 results := make([]trie.SyncResult, len(queue)) 148 for i, hash := range queue { 149 data, err := srcDb.TrieDB().Node(hash) 150 if err != nil { 151 t.Fatalf("failed to retrieve node data for %x", hash) 152 } 153 results[i] = trie.SyncResult{Hash: hash, Data: data} 154 } 155 if _, index, err := sched.Process(results); err != nil { 156 t.Fatalf("failed to process result #%d: %v", index, err) 157 } 158 if index, err := sched.Commit(dstDb); err != nil { 159 t.Fatalf("failed to commit data #%d: %v", index, err) 160 } 161 queue = append(queue[:0], sched.Missing(batch)...) 162 } 163 // Cross check that the two states are in sync 164 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 165 } 166 167 // Tests that the trie scheduler can correctly reconstruct the state even if only 168 // partial results are returned, and the others sent only later. 169 func TestIterativeDelayedStateSync(t *testing.T) { 170 // Create a random state to copy 171 srcDb, srcRoot, srcAccounts := makeTestState() 172 173 // Create a destination state and sync with the scheduler 174 dstDb := ethdb.NewMemDatabase() 175 sched := NewStateSync(srcRoot, dstDb) 176 177 queue := append([]common.Hash{}, sched.Missing(0)...) 178 for len(queue) > 0 { 179 // Sync only half of the scheduled nodes 180 results := make([]trie.SyncResult, len(queue)/2+1) 181 for i, hash := range queue[:len(results)] { 182 data, err := srcDb.TrieDB().Node(hash) 183 if err != nil { 184 t.Fatalf("failed to retrieve node data for %x", hash) 185 } 186 results[i] = trie.SyncResult{Hash: hash, Data: data} 187 } 188 if _, index, err := sched.Process(results); err != nil { 189 t.Fatalf("failed to process result #%d: %v", index, err) 190 } 191 if index, err := sched.Commit(dstDb); err != nil { 192 t.Fatalf("failed to commit data #%d: %v", index, err) 193 } 194 queue = append(queue[len(results):], sched.Missing(0)...) 195 } 196 // Cross check that the two states are in sync 197 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 198 } 199 200 // Tests that given a root hash, a trie can sync iteratively on a single thread, 201 // requesting retrieval tasks and returning all of them in one go, however in a 202 // random order. 203 func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } 204 func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } 205 206 func testIterativeRandomStateSync(t *testing.T, batch int) { 207 // Create a random state to copy 208 srcDb, srcRoot, srcAccounts := makeTestState() 209 210 // Create a destination state and sync with the scheduler 211 dstDb := ethdb.NewMemDatabase() 212 sched := NewStateSync(srcRoot, dstDb) 213 214 queue := make(map[common.Hash]struct{}) 215 for _, hash := range sched.Missing(batch) { 216 queue[hash] = struct{}{} 217 } 218 for len(queue) > 0 { 219 // Fetch all the queued nodes in a random order 220 results := make([]trie.SyncResult, 0, len(queue)) 221 for hash := range queue { 222 data, err := srcDb.TrieDB().Node(hash) 223 if err != nil { 224 t.Fatalf("failed to retrieve node data for %x", hash) 225 } 226 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 227 } 228 // Feed the retrieved results back and queue new tasks 229 if _, index, err := sched.Process(results); err != nil { 230 t.Fatalf("failed to process result #%d: %v", index, err) 231 } 232 if index, err := sched.Commit(dstDb); err != nil { 233 t.Fatalf("failed to commit data #%d: %v", index, err) 234 } 235 queue = make(map[common.Hash]struct{}) 236 for _, hash := range sched.Missing(batch) { 237 queue[hash] = struct{}{} 238 } 239 } 240 // Cross check that the two states are in sync 241 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 242 } 243 244 // Tests that the trie scheduler can correctly reconstruct the state even if only 245 // partial results are returned (Even those randomly), others sent only later. 246 func TestIterativeRandomDelayedStateSync(t *testing.T) { 247 // Create a random state to copy 248 srcDb, srcRoot, srcAccounts := makeTestState() 249 250 // Create a destination state and sync with the scheduler 251 dstDb := ethdb.NewMemDatabase() 252 sched := NewStateSync(srcRoot, dstDb) 253 254 queue := make(map[common.Hash]struct{}) 255 for _, hash := range sched.Missing(0) { 256 queue[hash] = struct{}{} 257 } 258 for len(queue) > 0 { 259 // Sync only half of the scheduled nodes, even those in random order 260 results := make([]trie.SyncResult, 0, len(queue)/2+1) 261 for hash := range queue { 262 delete(queue, hash) 263 264 data, err := srcDb.TrieDB().Node(hash) 265 if err != nil { 266 t.Fatalf("failed to retrieve node data for %x", hash) 267 } 268 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 269 270 if len(results) >= cap(results) { 271 break 272 } 273 } 274 // Feed the retrieved results back and queue new tasks 275 if _, index, err := sched.Process(results); err != nil { 276 t.Fatalf("failed to process result #%d: %v", index, err) 277 } 278 if index, err := sched.Commit(dstDb); err != nil { 279 t.Fatalf("failed to commit data #%d: %v", index, err) 280 } 281 for _, hash := range sched.Missing(0) { 282 queue[hash] = struct{}{} 283 } 284 } 285 // Cross check that the two states are in sync 286 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 287 } 288 289 // Tests that at any point in time during a sync, only complete sub-tries are in 290 // the database. 291 func TestIncompleteStateSync(t *testing.T) { 292 // Create a random state to copy 293 srcDb, srcRoot, srcAccounts := makeTestState() 294 295 checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) 296 297 // Create a destination state and sync with the scheduler 298 dstDb := ethdb.NewMemDatabase() 299 sched := NewStateSync(srcRoot, dstDb) 300 301 added := []common.Hash{} 302 queue := append([]common.Hash{}, sched.Missing(1)...) 303 for len(queue) > 0 { 304 // Fetch a batch of state nodes 305 results := make([]trie.SyncResult, len(queue)) 306 for i, hash := range queue { 307 data, err := srcDb.TrieDB().Node(hash) 308 if err != nil { 309 t.Fatalf("failed to retrieve node data for %x", hash) 310 } 311 results[i] = trie.SyncResult{Hash: hash, Data: data} 312 } 313 // Process each of the state nodes 314 if _, index, err := sched.Process(results); err != nil { 315 t.Fatalf("failed to process result #%d: %v", index, err) 316 } 317 if index, err := sched.Commit(dstDb); err != nil { 318 t.Fatalf("failed to commit data #%d: %v", index, err) 319 } 320 for _, result := range results { 321 added = append(added, result.Hash) 322 } 323 // Check that all known sub-tries added so far are complete or missing entirely. 324 checkSubtries: 325 for _, hash := range added { 326 for _, acc := range srcAccounts { 327 if hash == crypto.Keccak256Hash(acc.code) { 328 continue checkSubtries // skip trie check of code nodes. 329 } 330 } 331 // Can't use checkStateConsistency here because subtrie keys may have odd 332 // length and crash in LeafKey. 333 if err := checkTrieConsistency(dstDb, hash); err != nil { 334 t.Fatalf("state inconsistent: %v", err) 335 } 336 } 337 // Fetch the next batch to retrieve 338 queue = append(queue[:0], sched.Missing(1)...) 339 } 340 // Sanity check that removing any node from the database is detected 341 for _, node := range added[1:] { 342 key := node.Bytes() 343 value, _ := dstDb.Get(key) 344 345 dstDb.Delete(key) 346 if err := checkStateConsistency(dstDb, added[0]); err == nil { 347 t.Fatalf("trie inconsistency not caught, missing: %x", key) 348 } 349 dstDb.Put(key, value) 350 } 351 }