github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/core/state/sync_test.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 package state 13 14 import ( 15 "bytes" 16 "math/big" 17 "testing" 18 19 "github.com/Sberex/go-sberex/common" 20 "github.com/Sberex/go-sberex/crypto" 21 "github.com/Sberex/go-sberex/ethdb" 22 "github.com/Sberex/go-sberex/trie" 23 ) 24 25 // testAccount is the data associated with an account used by the state tests. 26 type testAccount struct { 27 address common.Address 28 balance *big.Int 29 nonce uint64 30 code []byte 31 } 32 33 // makeTestState create a sample test state to test node-wise reconstruction. 34 func makeTestState() (Database, common.Hash, []*testAccount) { 35 // Create an empty state 36 diskdb, _ := ethdb.NewMemDatabase() 37 db := NewDatabase(diskdb) 38 state, _ := New(common.Hash{}, db) 39 40 // Fill it with some arbitrary data 41 accounts := []*testAccount{} 42 for i := byte(0); i < 96; i++ { 43 obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) 44 acc := &testAccount{address: common.BytesToAddress([]byte{i})} 45 46 obj.AddBalance(big.NewInt(int64(11 * i))) 47 acc.balance = big.NewInt(int64(11 * i)) 48 49 obj.SetNonce(uint64(42 * i)) 50 acc.nonce = uint64(42 * i) 51 52 if i%3 == 0 { 53 obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) 54 acc.code = []byte{i, i, i, i, i} 55 } 56 state.updateStateObject(obj) 57 accounts = append(accounts, acc) 58 } 59 root, _ := state.Commit(false) 60 61 // Return the generated state 62 return db, root, accounts 63 } 64 65 // checkStateAccounts cross references a reconstructed state with an expected 66 // account array. 67 func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { 68 // Check root availability and state contents 69 state, err := New(root, NewDatabase(db)) 70 if err != nil { 71 t.Fatalf("failed to create state trie at %x: %v", root, err) 72 } 73 if err := checkStateConsistency(db, root); err != nil { 74 t.Fatalf("inconsistent state trie at %x: %v", root, err) 75 } 76 for i, acc := range accounts { 77 if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 { 78 t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance) 79 } 80 if nonce := state.GetNonce(acc.address); nonce != acc.nonce { 81 t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce) 82 } 83 if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) { 84 t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code) 85 } 86 } 87 } 88 89 // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. 90 func checkTrieConsistency(db ethdb.Database, root common.Hash) error { 91 if v, _ := db.Get(root[:]); v == nil { 92 return nil // Consider a non existent state consistent. 93 } 94 trie, err := trie.New(root, trie.NewDatabase(db)) 95 if err != nil { 96 return err 97 } 98 it := trie.NodeIterator(nil) 99 for it.Next(true) { 100 } 101 return it.Error() 102 } 103 104 // checkStateConsistency checks that all data of a state root is present. 105 func checkStateConsistency(db ethdb.Database, root common.Hash) error { 106 // Create and iterate a state trie rooted in a sub-node 107 if _, err := db.Get(root.Bytes()); err != nil { 108 return nil // Consider a non existent state consistent. 109 } 110 state, err := New(root, NewDatabase(db)) 111 if err != nil { 112 return err 113 } 114 it := NewNodeIterator(state) 115 for it.Next() { 116 } 117 return it.Error 118 } 119 120 // Tests that an empty state is not scheduled for syncing. 121 func TestEmptyStateSync(t *testing.T) { 122 empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 123 db, _ := ethdb.NewMemDatabase() 124 if req := NewStateSync(empty, db).Missing(1); len(req) != 0 { 125 t.Errorf("content requested for empty state: %v", req) 126 } 127 } 128 129 // Tests that given a root hash, a state can sync iteratively on a single thread, 130 // requesting retrieval tasks and returning all of them in one go. 131 func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } 132 func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } 133 134 func testIterativeStateSync(t *testing.T, batch int) { 135 // Create a random state to copy 136 srcDb, srcRoot, srcAccounts := makeTestState() 137 138 // Create a destination state and sync with the scheduler 139 dstDb, _ := ethdb.NewMemDatabase() 140 sched := NewStateSync(srcRoot, dstDb) 141 142 queue := append([]common.Hash{}, sched.Missing(batch)...) 143 for len(queue) > 0 { 144 results := make([]trie.SyncResult, len(queue)) 145 for i, hash := range queue { 146 data, err := srcDb.TrieDB().Node(hash) 147 if err != nil { 148 t.Fatalf("failed to retrieve node data for %x", hash) 149 } 150 results[i] = trie.SyncResult{Hash: hash, Data: data} 151 } 152 if _, index, err := sched.Process(results); err != nil { 153 t.Fatalf("failed to process result #%d: %v", index, err) 154 } 155 if index, err := sched.Commit(dstDb); err != nil { 156 t.Fatalf("failed to commit data #%d: %v", index, err) 157 } 158 queue = append(queue[:0], sched.Missing(batch)...) 159 } 160 // Cross check that the two states are in sync 161 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 162 } 163 164 // Tests that the trie scheduler can correctly reconstruct the state even if only 165 // partial results are returned, and the others sent only later. 166 func TestIterativeDelayedStateSync(t *testing.T) { 167 // Create a random state to copy 168 srcDb, srcRoot, srcAccounts := makeTestState() 169 170 // Create a destination state and sync with the scheduler 171 dstDb, _ := ethdb.NewMemDatabase() 172 sched := NewStateSync(srcRoot, dstDb) 173 174 queue := append([]common.Hash{}, sched.Missing(0)...) 175 for len(queue) > 0 { 176 // Sync only half of the scheduled nodes 177 results := make([]trie.SyncResult, len(queue)/2+1) 178 for i, hash := range queue[:len(results)] { 179 data, err := srcDb.TrieDB().Node(hash) 180 if err != nil { 181 t.Fatalf("failed to retrieve node data for %x", hash) 182 } 183 results[i] = trie.SyncResult{Hash: hash, Data: data} 184 } 185 if _, index, err := sched.Process(results); err != nil { 186 t.Fatalf("failed to process result #%d: %v", index, err) 187 } 188 if index, err := sched.Commit(dstDb); err != nil { 189 t.Fatalf("failed to commit data #%d: %v", index, err) 190 } 191 queue = append(queue[len(results):], sched.Missing(0)...) 192 } 193 // Cross check that the two states are in sync 194 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 195 } 196 197 // Tests that given a root hash, a trie can sync iteratively on a single thread, 198 // requesting retrieval tasks and returning all of them in one go, however in a 199 // random order. 200 func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } 201 func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } 202 203 func testIterativeRandomStateSync(t *testing.T, batch int) { 204 // Create a random state to copy 205 srcDb, srcRoot, srcAccounts := makeTestState() 206 207 // Create a destination state and sync with the scheduler 208 dstDb, _ := ethdb.NewMemDatabase() 209 sched := NewStateSync(srcRoot, dstDb) 210 211 queue := make(map[common.Hash]struct{}) 212 for _, hash := range sched.Missing(batch) { 213 queue[hash] = struct{}{} 214 } 215 for len(queue) > 0 { 216 // Fetch all the queued nodes in a random order 217 results := make([]trie.SyncResult, 0, len(queue)) 218 for hash := range queue { 219 data, err := srcDb.TrieDB().Node(hash) 220 if err != nil { 221 t.Fatalf("failed to retrieve node data for %x", hash) 222 } 223 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 224 } 225 // Feed the retrieved results back and queue new tasks 226 if _, index, err := sched.Process(results); err != nil { 227 t.Fatalf("failed to process result #%d: %v", index, err) 228 } 229 if index, err := sched.Commit(dstDb); err != nil { 230 t.Fatalf("failed to commit data #%d: %v", index, err) 231 } 232 queue = make(map[common.Hash]struct{}) 233 for _, hash := range sched.Missing(batch) { 234 queue[hash] = struct{}{} 235 } 236 } 237 // Cross check that the two states are in sync 238 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 239 } 240 241 // Tests that the trie scheduler can correctly reconstruct the state even if only 242 // partial results are returned (Even those randomly), others sent only later. 243 func TestIterativeRandomDelayedStateSync(t *testing.T) { 244 // Create a random state to copy 245 srcDb, srcRoot, srcAccounts := makeTestState() 246 247 // Create a destination state and sync with the scheduler 248 dstDb, _ := ethdb.NewMemDatabase() 249 sched := NewStateSync(srcRoot, dstDb) 250 251 queue := make(map[common.Hash]struct{}) 252 for _, hash := range sched.Missing(0) { 253 queue[hash] = struct{}{} 254 } 255 for len(queue) > 0 { 256 // Sync only half of the scheduled nodes, even those in random order 257 results := make([]trie.SyncResult, 0, len(queue)/2+1) 258 for hash := range queue { 259 delete(queue, hash) 260 261 data, err := srcDb.TrieDB().Node(hash) 262 if err != nil { 263 t.Fatalf("failed to retrieve node data for %x", hash) 264 } 265 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 266 267 if len(results) >= cap(results) { 268 break 269 } 270 } 271 // Feed the retrieved results back and queue new tasks 272 if _, index, err := sched.Process(results); err != nil { 273 t.Fatalf("failed to process result #%d: %v", index, err) 274 } 275 if index, err := sched.Commit(dstDb); err != nil { 276 t.Fatalf("failed to commit data #%d: %v", index, err) 277 } 278 for _, hash := range sched.Missing(0) { 279 queue[hash] = struct{}{} 280 } 281 } 282 // Cross check that the two states are in sync 283 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 284 } 285 286 // Tests that at any point in time during a sync, only complete sub-tries are in 287 // the database. 288 func TestIncompleteStateSync(t *testing.T) { 289 // Create a random state to copy 290 srcDb, srcRoot, srcAccounts := makeTestState() 291 292 checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) 293 294 // Create a destination state and sync with the scheduler 295 dstDb, _ := ethdb.NewMemDatabase() 296 sched := NewStateSync(srcRoot, dstDb) 297 298 added := []common.Hash{} 299 queue := append([]common.Hash{}, sched.Missing(1)...) 300 for len(queue) > 0 { 301 // Fetch a batch of state nodes 302 results := make([]trie.SyncResult, len(queue)) 303 for i, hash := range queue { 304 data, err := srcDb.TrieDB().Node(hash) 305 if err != nil { 306 t.Fatalf("failed to retrieve node data for %x", hash) 307 } 308 results[i] = trie.SyncResult{Hash: hash, Data: data} 309 } 310 // Process each of the state nodes 311 if _, index, err := sched.Process(results); err != nil { 312 t.Fatalf("failed to process result #%d: %v", index, err) 313 } 314 if index, err := sched.Commit(dstDb); err != nil { 315 t.Fatalf("failed to commit data #%d: %v", index, err) 316 } 317 for _, result := range results { 318 added = append(added, result.Hash) 319 } 320 // Check that all known sub-tries added so far are complete or missing entirely. 321 checkSubtries: 322 for _, hash := range added { 323 for _, acc := range srcAccounts { 324 if hash == crypto.Keccak256Hash(acc.code) { 325 continue checkSubtries // skip trie check of code nodes. 326 } 327 } 328 // Can't use checkStateConsistency here because subtrie keys may have odd 329 // length and crash in LeafKey. 330 if err := checkTrieConsistency(dstDb, hash); err != nil { 331 t.Fatalf("state inconsistent: %v", err) 332 } 333 } 334 // Fetch the next batch to retrieve 335 queue = append(queue[:0], sched.Missing(1)...) 336 } 337 // Sanity check that removing any node from the database is detected 338 for _, node := range added[1:] { 339 key := node.Bytes() 340 value, _ := dstDb.Get(key) 341 342 dstDb.Delete(key) 343 if err := checkStateConsistency(dstDb, added[0]); err == nil { 344 t.Fatalf("trie inconsistency not caught, missing: %x", key) 345 } 346 dstDb.Put(key, value) 347 } 348 }