github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/core/state/sync_test.go (about) 1 package state 2 3 import ( 4 "bytes" 5 "math/big" 6 "testing" 7 8 "github.com/quickchainproject/quickchain/common" 9 "github.com/quickchainproject/quickchain/crypto" 10 "github.com/quickchainproject/quickchain/qctdb" 11 "github.com/quickchainproject/quickchain/trie" 12 ) 13 14 // testAccount is the data associated with an account used by the state tests. 15 type testAccount struct { 16 address common.Address 17 balance *big.Int 18 nonce uint64 19 code []byte 20 } 21 22 // makeTestState create a sample test state to test node-wise reconstruction. 23 func makeTestState() (Database, common.Hash, []*testAccount) { 24 // Create an empty state 25 diskdb, _ := qctdb.NewMemDatabase() 26 db := NewDatabase(diskdb) 27 state, _ := New(common.Hash{}, db) 28 29 // Fill it with some arbitrary data 30 accounts := []*testAccount{} 31 for i := byte(0); i < 96; i++ { 32 obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) 33 acc := &testAccount{address: common.BytesToAddress([]byte{i})} 34 35 obj.AddBalance(big.NewInt(int64(11 * i))) 36 acc.balance = big.NewInt(int64(11 * i)) 37 38 obj.SetNonce(uint64(42 * i)) 39 acc.nonce = uint64(42 * i) 40 41 if i%3 == 0 { 42 obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) 43 acc.code = []byte{i, i, i, i, i} 44 } 45 state.updateStateObject(obj) 46 accounts = append(accounts, acc) 47 } 48 root, _ := state.Commit(false) 49 50 // Return the generated state 51 return db, root, accounts 52 } 53 54 // checkStateAccounts cross references a reconstructed state with an expected 55 // account array. 56 func checkStateAccounts(t *testing.T, db qctdb.Database, root common.Hash, accounts []*testAccount) { 57 // Check root availability and state contents 58 state, err := New(root, NewDatabase(db)) 59 if err != nil { 60 t.Fatalf("failed to create state trie at %x: %v", root, err) 61 } 62 if err := checkStateConsistency(db, root); err != nil { 63 t.Fatalf("inconsistent state trie at %x: %v", root, err) 64 } 65 for i, acc := range accounts { 66 if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 { 67 t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance) 68 } 69 if nonce := state.GetNonce(acc.address); nonce != acc.nonce { 70 t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce) 71 } 72 if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) { 73 t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code) 74 } 75 } 76 } 77 78 // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. 79 func checkTrieConsistency(db qctdb.Database, root common.Hash) error { 80 if v, _ := db.Get(root[:]); v == nil { 81 return nil // Consider a non existent state consistent. 82 } 83 trie, err := trie.New(root, trie.NewDatabase(db)) 84 if err != nil { 85 return err 86 } 87 it := trie.NodeIterator(nil) 88 for it.Next(true) { 89 } 90 return it.Error() 91 } 92 93 // checkStateConsistency checks that all data of a state root is present. 94 func checkStateConsistency(db qctdb.Database, root common.Hash) error { 95 // Create and iterate a state trie rooted in a sub-node 96 if _, err := db.Get(root.Bytes()); err != nil { 97 return nil // Consider a non existent state consistent. 98 } 99 state, err := New(root, NewDatabase(db)) 100 if err != nil { 101 return err 102 } 103 it := NewNodeIterator(state) 104 for it.Next() { 105 } 106 return it.Error 107 } 108 109 // Tests that an empty state is not scheduled for syncing. 110 func TestEmptyStateSync(t *testing.T) { 111 empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 112 db, _ := qctdb.NewMemDatabase() 113 if req := NewStateSync(empty, db).Missing(1); len(req) != 0 { 114 t.Errorf("content requested for empty state: %v", req) 115 } 116 } 117 118 // Tests that given a root hash, a state can sync iteratively on a single thread, 119 // requesting retrieval tasks and returning all of them in one go. 120 func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } 121 func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } 122 123 func testIterativeStateSync(t *testing.T, batch int) { 124 // Create a random state to copy 125 srcDb, srcRoot, srcAccounts := makeTestState() 126 127 // Create a destination state and sync with the scheduler 128 dstDb, _ := qctdb.NewMemDatabase() 129 sched := NewStateSync(srcRoot, dstDb) 130 131 queue := append([]common.Hash{}, sched.Missing(batch)...) 132 for len(queue) > 0 { 133 results := make([]trie.SyncResult, len(queue)) 134 for i, hash := range queue { 135 data, err := srcDb.TrieDB().Node(hash) 136 if err != nil { 137 t.Fatalf("failed to retrieve node data for %x", hash) 138 } 139 results[i] = trie.SyncResult{Hash: hash, Data: data} 140 } 141 if _, index, err := sched.Process(results); err != nil { 142 t.Fatalf("failed to process result #%d: %v", index, err) 143 } 144 if index, err := sched.Commit(dstDb); err != nil { 145 t.Fatalf("failed to commit data #%d: %v", index, err) 146 } 147 queue = append(queue[:0], sched.Missing(batch)...) 148 } 149 // Cross check that the two states are in sync 150 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 151 } 152 153 // Tests that the trie scheduler can correctly reconstruct the state even if only 154 // partial results are returned, and the others sent only later. 155 func TestIterativeDelayedStateSync(t *testing.T) { 156 // Create a random state to copy 157 srcDb, srcRoot, srcAccounts := makeTestState() 158 159 // Create a destination state and sync with the scheduler 160 dstDb, _ := qctdb.NewMemDatabase() 161 sched := NewStateSync(srcRoot, dstDb) 162 163 queue := append([]common.Hash{}, sched.Missing(0)...) 164 for len(queue) > 0 { 165 // Sync only half of the scheduled nodes 166 results := make([]trie.SyncResult, len(queue)/2+1) 167 for i, hash := range queue[:len(results)] { 168 data, err := srcDb.TrieDB().Node(hash) 169 if err != nil { 170 t.Fatalf("failed to retrieve node data for %x", hash) 171 } 172 results[i] = trie.SyncResult{Hash: hash, Data: data} 173 } 174 if _, index, err := sched.Process(results); err != nil { 175 t.Fatalf("failed to process result #%d: %v", index, err) 176 } 177 if index, err := sched.Commit(dstDb); err != nil { 178 t.Fatalf("failed to commit data #%d: %v", index, err) 179 } 180 queue = append(queue[len(results):], sched.Missing(0)...) 181 } 182 // Cross check that the two states are in sync 183 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 184 } 185 186 // Tests that given a root hash, a trie can sync iteratively on a single thread, 187 // requesting retrieval tasks and returning all of them in one go, however in a 188 // random order. 189 func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } 190 func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } 191 192 func testIterativeRandomStateSync(t *testing.T, batch int) { 193 // Create a random state to copy 194 srcDb, srcRoot, srcAccounts := makeTestState() 195 196 // Create a destination state and sync with the scheduler 197 dstDb, _ := qctdb.NewMemDatabase() 198 sched := NewStateSync(srcRoot, dstDb) 199 200 queue := make(map[common.Hash]struct{}) 201 for _, hash := range sched.Missing(batch) { 202 queue[hash] = struct{}{} 203 } 204 for len(queue) > 0 { 205 // Fetch all the queued nodes in a random order 206 results := make([]trie.SyncResult, 0, len(queue)) 207 for hash := range queue { 208 data, err := srcDb.TrieDB().Node(hash) 209 if err != nil { 210 t.Fatalf("failed to retrieve node data for %x", hash) 211 } 212 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 213 } 214 // Feed the retrieved results back and queue new tasks 215 if _, index, err := sched.Process(results); err != nil { 216 t.Fatalf("failed to process result #%d: %v", index, err) 217 } 218 if index, err := sched.Commit(dstDb); err != nil { 219 t.Fatalf("failed to commit data #%d: %v", index, err) 220 } 221 queue = make(map[common.Hash]struct{}) 222 for _, hash := range sched.Missing(batch) { 223 queue[hash] = struct{}{} 224 } 225 } 226 // Cross check that the two states are in sync 227 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 228 } 229 230 // Tests that the trie scheduler can correctly reconstruct the state even if only 231 // partial results are returned (Even those randomly), others sent only later. 232 func TestIterativeRandomDelayedStateSync(t *testing.T) { 233 // Create a random state to copy 234 srcDb, srcRoot, srcAccounts := makeTestState() 235 236 // Create a destination state and sync with the scheduler 237 dstDb, _ := qctdb.NewMemDatabase() 238 sched := NewStateSync(srcRoot, dstDb) 239 240 queue := make(map[common.Hash]struct{}) 241 for _, hash := range sched.Missing(0) { 242 queue[hash] = struct{}{} 243 } 244 for len(queue) > 0 { 245 // Sync only half of the scheduled nodes, even those in random order 246 results := make([]trie.SyncResult, 0, len(queue)/2+1) 247 for hash := range queue { 248 delete(queue, hash) 249 250 data, err := srcDb.TrieDB().Node(hash) 251 if err != nil { 252 t.Fatalf("failed to retrieve node data for %x", hash) 253 } 254 results = append(results, trie.SyncResult{Hash: hash, Data: data}) 255 256 if len(results) >= cap(results) { 257 break 258 } 259 } 260 // Feed the retrieved results back and queue new tasks 261 if _, index, err := sched.Process(results); err != nil { 262 t.Fatalf("failed to process result #%d: %v", index, err) 263 } 264 if index, err := sched.Commit(dstDb); err != nil { 265 t.Fatalf("failed to commit data #%d: %v", index, err) 266 } 267 for _, hash := range sched.Missing(0) { 268 queue[hash] = struct{}{} 269 } 270 } 271 // Cross check that the two states are in sync 272 checkStateAccounts(t, dstDb, srcRoot, srcAccounts) 273 } 274 275 // Tests that at any point in time during a sync, only complete sub-tries are in 276 // the database. 277 func TestIncompleteStateSync(t *testing.T) { 278 // Create a random state to copy 279 srcDb, srcRoot, srcAccounts := makeTestState() 280 281 checkTrieConsistency(srcDb.TrieDB().DiskDB().(qctdb.Database), srcRoot) 282 283 // Create a destination state and sync with the scheduler 284 dstDb, _ := qctdb.NewMemDatabase() 285 sched := NewStateSync(srcRoot, dstDb) 286 287 added := []common.Hash{} 288 queue := append([]common.Hash{}, sched.Missing(1)...) 289 for len(queue) > 0 { 290 // Fetch a batch of state nodes 291 results := make([]trie.SyncResult, len(queue)) 292 for i, hash := range queue { 293 data, err := srcDb.TrieDB().Node(hash) 294 if err != nil { 295 t.Fatalf("failed to retrieve node data for %x", hash) 296 } 297 results[i] = trie.SyncResult{Hash: hash, Data: data} 298 } 299 // Process each of the state nodes 300 if _, index, err := sched.Process(results); err != nil { 301 t.Fatalf("failed to process result #%d: %v", index, err) 302 } 303 if index, err := sched.Commit(dstDb); err != nil { 304 t.Fatalf("failed to commit data #%d: %v", index, err) 305 } 306 for _, result := range results { 307 added = append(added, result.Hash) 308 } 309 // Check that all known sub-tries added so far are complete or missing entirely. 310 checkSubtries: 311 for _, hash := range added { 312 for _, acc := range srcAccounts { 313 if hash == crypto.Keccak256Hash(acc.code) { 314 continue checkSubtries // skip trie check of code nodes. 315 } 316 } 317 // Can't use checkStateConsistency here because subtrie keys may have odd 318 // length and crash in LeafKey. 319 if err := checkTrieConsistency(dstDb, hash); err != nil { 320 t.Fatalf("state inconsistent: %v", err) 321 } 322 } 323 // Fetch the next batch to retrieve 324 queue = append(queue[:0], sched.Missing(1)...) 325 } 326 // Sanity check that removing any node from the database is detected 327 for _, node := range added[1:] { 328 key := node.Bytes() 329 value, _ := dstDb.Get(key) 330 331 dstDb.Delete(key) 332 if err := checkStateConsistency(dstDb, added[0]); err == nil { 333 t.Fatalf("trie inconsistency not caught, missing: %x", key) 334 } 335 dstDb.Put(key, value) 336 } 337 }