github.com/aswedchain/aswed@v1.0.1/trie/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "bytes" 21 "testing" 22 23 "github.com/aswedchain/aswed/common" 24 "github.com/aswedchain/aswed/crypto" 25 "github.com/aswedchain/aswed/ethdb/memorydb" 26 ) 27 28 // makeTestTrie create a sample test trie to test node-wise reconstruction. 29 func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) { 30 // Create an empty trie 31 triedb := NewDatabase(memorydb.New()) 32 trie, _ := NewSecure(common.Hash{}, triedb) 33 34 // Fill it with some arbitrary data 35 content := make(map[string][]byte) 36 for i := byte(0); i < 255; i++ { 37 // Map the same data under multiple keys 38 key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} 39 content[string(key)] = val 40 trie.Update(key, val) 41 42 key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} 43 content[string(key)] = val 44 trie.Update(key, val) 45 46 // Add some other data to inflate the trie 47 for j := byte(3); j < 13; j++ { 48 key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} 49 content[string(key)] = val 50 trie.Update(key, val) 51 } 52 } 53 trie.Commit(nil) 54 55 // Return the generated trie 56 return triedb, trie, content 57 } 58 59 // checkTrieContents cross references a reconstructed trie with an expected data 60 // content map. 61 func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) { 62 // Check root availability and trie contents 63 trie, err := NewSecure(common.BytesToHash(root), db) 64 if err != nil { 65 t.Fatalf("failed to create trie at %x: %v", root, err) 66 } 67 if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil { 68 t.Fatalf("inconsistent trie at %x: %v", root, err) 69 } 70 for key, val := range content { 71 if have := trie.Get([]byte(key)); !bytes.Equal(have, val) { 72 t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) 73 } 74 } 75 } 76 77 // checkTrieConsistency checks that all nodes in a trie are indeed present. 78 func checkTrieConsistency(db *Database, root common.Hash) error { 79 // Create and iterate a trie rooted in a subnode 80 trie, err := NewSecure(root, db) 81 if err != nil { 82 return nil // Consider a non existent state consistent 83 } 84 it := trie.NodeIterator(nil) 85 for it.Next(true) { 86 } 87 return it.Error() 88 } 89 90 // Tests that an empty trie is not scheduled for syncing. 91 func TestEmptySync(t *testing.T) { 92 dbA := NewDatabase(memorydb.New()) 93 dbB := NewDatabase(memorydb.New()) 94 emptyA, _ := New(common.Hash{}, dbA) 95 emptyB, _ := New(emptyRoot, dbB) 96 97 for i, trie := range []*Trie{emptyA, emptyB} { 98 sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New())) 99 if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { 100 t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes) 101 } 102 } 103 } 104 105 // Tests that given a root hash, a trie can sync iteratively on a single thread, 106 // requesting retrieval tasks and returning all of them in one go. 107 func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1, false) } 108 func TestIterativeSyncBatched(t *testing.T) { testIterativeSync(t, 100, false) } 109 func TestIterativeSyncIndividualByPath(t *testing.T) { testIterativeSync(t, 1, true) } 110 func TestIterativeSyncBatchedByPath(t *testing.T) { testIterativeSync(t, 100, true) } 111 112 func testIterativeSync(t *testing.T, count int, bypath bool) { 113 // Create a random trie to copy 114 srcDb, srcTrie, srcData := makeTestTrie() 115 116 // Create a destination trie and sync with the scheduler 117 diskdb := memorydb.New() 118 triedb := NewDatabase(diskdb) 119 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 120 121 nodes, paths, codes := sched.Missing(count) 122 var ( 123 hashQueue []common.Hash 124 pathQueue []SyncPath 125 ) 126 if !bypath { 127 hashQueue = append(append(hashQueue[:0], nodes...), codes...) 128 } else { 129 hashQueue = append(hashQueue[:0], codes...) 130 pathQueue = append(pathQueue[:0], paths...) 131 } 132 for len(hashQueue)+len(pathQueue) > 0 { 133 results := make([]SyncResult, len(hashQueue)+len(pathQueue)) 134 for i, hash := range hashQueue { 135 data, err := srcDb.Node(hash) 136 if err != nil { 137 t.Fatalf("failed to retrieve node data for hash %x: %v", hash, err) 138 } 139 results[i] = SyncResult{hash, data} 140 } 141 for i, path := range pathQueue { 142 data, _, err := srcTrie.TryGetNode(path[0]) 143 if err != nil { 144 t.Fatalf("failed to retrieve node data for path %x: %v", path, err) 145 } 146 results[len(hashQueue)+i] = SyncResult{crypto.Keccak256Hash(data), data} 147 } 148 for _, result := range results { 149 if err := sched.Process(result); err != nil { 150 t.Fatalf("failed to process result %v", err) 151 } 152 } 153 batch := diskdb.NewBatch() 154 if err := sched.Commit(batch); err != nil { 155 t.Fatalf("failed to commit data: %v", err) 156 } 157 batch.Write() 158 159 nodes, paths, codes = sched.Missing(count) 160 if !bypath { 161 hashQueue = append(append(hashQueue[:0], nodes...), codes...) 162 } else { 163 hashQueue = append(hashQueue[:0], codes...) 164 pathQueue = append(pathQueue[:0], paths...) 165 } 166 } 167 // Cross check that the two tries are in sync 168 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 169 } 170 171 // Tests that the trie scheduler can correctly reconstruct the state even if only 172 // partial results are returned, and the others sent only later. 173 func TestIterativeDelayedSync(t *testing.T) { 174 // Create a random trie to copy 175 srcDb, srcTrie, srcData := makeTestTrie() 176 177 // Create a destination trie and sync with the scheduler 178 diskdb := memorydb.New() 179 triedb := NewDatabase(diskdb) 180 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 181 182 nodes, _, codes := sched.Missing(10000) 183 queue := append(append([]common.Hash{}, nodes...), codes...) 184 185 for len(queue) > 0 { 186 // Sync only half of the scheduled nodes 187 results := make([]SyncResult, len(queue)/2+1) 188 for i, hash := range queue[:len(results)] { 189 data, err := srcDb.Node(hash) 190 if err != nil { 191 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 192 } 193 results[i] = SyncResult{hash, data} 194 } 195 for _, result := range results { 196 if err := sched.Process(result); err != nil { 197 t.Fatalf("failed to process result %v", err) 198 } 199 } 200 batch := diskdb.NewBatch() 201 if err := sched.Commit(batch); err != nil { 202 t.Fatalf("failed to commit data: %v", err) 203 } 204 batch.Write() 205 206 nodes, _, codes = sched.Missing(10000) 207 queue = append(append(queue[len(results):], nodes...), codes...) 208 } 209 // Cross check that the two tries are in sync 210 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 211 } 212 213 // Tests that given a root hash, a trie can sync iteratively on a single thread, 214 // requesting retrieval tasks and returning all of them in one go, however in a 215 // random order. 216 func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) } 217 func TestIterativeRandomSyncBatched(t *testing.T) { testIterativeRandomSync(t, 100) } 218 219 func testIterativeRandomSync(t *testing.T, count int) { 220 // Create a random trie to copy 221 srcDb, srcTrie, srcData := makeTestTrie() 222 223 // Create a destination trie and sync with the scheduler 224 diskdb := memorydb.New() 225 triedb := NewDatabase(diskdb) 226 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 227 228 queue := make(map[common.Hash]struct{}) 229 nodes, _, codes := sched.Missing(count) 230 for _, hash := range append(nodes, codes...) { 231 queue[hash] = struct{}{} 232 } 233 for len(queue) > 0 { 234 // Fetch all the queued nodes in a random order 235 results := make([]SyncResult, 0, len(queue)) 236 for hash := range queue { 237 data, err := srcDb.Node(hash) 238 if err != nil { 239 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 240 } 241 results = append(results, SyncResult{hash, data}) 242 } 243 // Feed the retrieved results back and queue new tasks 244 for _, result := range results { 245 if err := sched.Process(result); err != nil { 246 t.Fatalf("failed to process result %v", err) 247 } 248 } 249 batch := diskdb.NewBatch() 250 if err := sched.Commit(batch); err != nil { 251 t.Fatalf("failed to commit data: %v", err) 252 } 253 batch.Write() 254 255 queue = make(map[common.Hash]struct{}) 256 nodes, _, codes = sched.Missing(count) 257 for _, hash := range append(nodes, codes...) { 258 queue[hash] = struct{}{} 259 } 260 } 261 // Cross check that the two tries are in sync 262 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 263 } 264 265 // Tests that the trie scheduler can correctly reconstruct the state even if only 266 // partial results are returned (Even those randomly), others sent only later. 267 func TestIterativeRandomDelayedSync(t *testing.T) { 268 // Create a random trie to copy 269 srcDb, srcTrie, srcData := makeTestTrie() 270 271 // Create a destination trie and sync with the scheduler 272 diskdb := memorydb.New() 273 triedb := NewDatabase(diskdb) 274 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 275 276 queue := make(map[common.Hash]struct{}) 277 nodes, _, codes := sched.Missing(10000) 278 for _, hash := range append(nodes, codes...) { 279 queue[hash] = struct{}{} 280 } 281 for len(queue) > 0 { 282 // Sync only half of the scheduled nodes, even those in random order 283 results := make([]SyncResult, 0, len(queue)/2+1) 284 for hash := range queue { 285 data, err := srcDb.Node(hash) 286 if err != nil { 287 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 288 } 289 results = append(results, SyncResult{hash, data}) 290 291 if len(results) >= cap(results) { 292 break 293 } 294 } 295 // Feed the retrieved results back and queue new tasks 296 for _, result := range results { 297 if err := sched.Process(result); err != nil { 298 t.Fatalf("failed to process result %v", err) 299 } 300 } 301 batch := diskdb.NewBatch() 302 if err := sched.Commit(batch); err != nil { 303 t.Fatalf("failed to commit data: %v", err) 304 } 305 batch.Write() 306 for _, result := range results { 307 delete(queue, result.Hash) 308 } 309 nodes, _, codes = sched.Missing(10000) 310 for _, hash := range append(nodes, codes...) { 311 queue[hash] = struct{}{} 312 } 313 } 314 // Cross check that the two tries are in sync 315 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 316 } 317 318 // Tests that a trie sync will not request nodes multiple times, even if they 319 // have such references. 320 func TestDuplicateAvoidanceSync(t *testing.T) { 321 // Create a random trie to copy 322 srcDb, srcTrie, srcData := makeTestTrie() 323 324 // Create a destination trie and sync with the scheduler 325 diskdb := memorydb.New() 326 triedb := NewDatabase(diskdb) 327 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 328 329 nodes, _, codes := sched.Missing(0) 330 queue := append(append([]common.Hash{}, nodes...), codes...) 331 requested := make(map[common.Hash]struct{}) 332 333 for len(queue) > 0 { 334 results := make([]SyncResult, len(queue)) 335 for i, hash := range queue { 336 data, err := srcDb.Node(hash) 337 if err != nil { 338 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 339 } 340 if _, ok := requested[hash]; ok { 341 t.Errorf("hash %x already requested once", hash) 342 } 343 requested[hash] = struct{}{} 344 345 results[i] = SyncResult{hash, data} 346 } 347 for _, result := range results { 348 if err := sched.Process(result); err != nil { 349 t.Fatalf("failed to process result %v", err) 350 } 351 } 352 batch := diskdb.NewBatch() 353 if err := sched.Commit(batch); err != nil { 354 t.Fatalf("failed to commit data: %v", err) 355 } 356 batch.Write() 357 358 nodes, _, codes = sched.Missing(0) 359 queue = append(append(queue[:0], nodes...), codes...) 360 } 361 // Cross check that the two tries are in sync 362 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 363 } 364 365 // Tests that at any point in time during a sync, only complete sub-tries are in 366 // the database. 367 func TestIncompleteSync(t *testing.T) { 368 // Create a random trie to copy 369 srcDb, srcTrie, _ := makeTestTrie() 370 371 // Create a destination trie and sync with the scheduler 372 diskdb := memorydb.New() 373 triedb := NewDatabase(diskdb) 374 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 375 376 var added []common.Hash 377 378 nodes, _, codes := sched.Missing(1) 379 queue := append(append([]common.Hash{}, nodes...), codes...) 380 381 for len(queue) > 0 { 382 // Fetch a batch of trie nodes 383 results := make([]SyncResult, len(queue)) 384 for i, hash := range queue { 385 data, err := srcDb.Node(hash) 386 if err != nil { 387 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 388 } 389 results[i] = SyncResult{hash, data} 390 } 391 // Process each of the trie nodes 392 for _, result := range results { 393 if err := sched.Process(result); err != nil { 394 t.Fatalf("failed to process result %v", err) 395 } 396 } 397 batch := diskdb.NewBatch() 398 if err := sched.Commit(batch); err != nil { 399 t.Fatalf("failed to commit data: %v", err) 400 } 401 batch.Write() 402 for _, result := range results { 403 added = append(added, result.Hash) 404 } 405 // Check that all known sub-tries in the synced trie are complete 406 for _, root := range added { 407 if err := checkTrieConsistency(triedb, root); err != nil { 408 t.Fatalf("trie inconsistent: %v", err) 409 } 410 } 411 // Fetch the next batch to retrieve 412 nodes, _, codes = sched.Missing(1) 413 queue = append(append(queue[:0], nodes...), codes...) 414 } 415 // Sanity check that removing any node from the database is detected 416 for _, node := range added[1:] { 417 key := node.Bytes() 418 value, _ := diskdb.Get(key) 419 420 diskdb.Delete(key) 421 if err := checkTrieConsistency(triedb, added[0]); err == nil { 422 t.Fatalf("trie inconsistency not caught, missing: %x", key) 423 } 424 diskdb.Put(key, value) 425 } 426 } 427 428 // Tests that trie nodes get scheduled lexicographically when having the same 429 // depth. 430 func TestSyncOrdering(t *testing.T) { 431 // Create a random trie to copy 432 srcDb, srcTrie, srcData := makeTestTrie() 433 434 // Create a destination trie and sync with the scheduler, tracking the requests 435 diskdb := memorydb.New() 436 triedb := NewDatabase(diskdb) 437 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 438 439 nodes, paths, _ := sched.Missing(1) 440 queue := append([]common.Hash{}, nodes...) 441 reqs := append([]SyncPath{}, paths...) 442 443 for len(queue) > 0 { 444 results := make([]SyncResult, len(queue)) 445 for i, hash := range queue { 446 data, err := srcDb.Node(hash) 447 if err != nil { 448 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 449 } 450 results[i] = SyncResult{hash, data} 451 } 452 for _, result := range results { 453 if err := sched.Process(result); err != nil { 454 t.Fatalf("failed to process result %v", err) 455 } 456 } 457 batch := diskdb.NewBatch() 458 if err := sched.Commit(batch); err != nil { 459 t.Fatalf("failed to commit data: %v", err) 460 } 461 batch.Write() 462 463 nodes, paths, _ = sched.Missing(1) 464 queue = append(queue[:0], nodes...) 465 reqs = append(reqs, paths...) 466 } 467 // Cross check that the two tries are in sync 468 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 469 470 // Check that the trie nodes have been requested path-ordered 471 for i := 0; i < len(reqs)-1; i++ { 472 if len(reqs[i]) > 1 || len(reqs[i+1]) > 1 { 473 // In the case of the trie tests, there's no storage so the tuples 474 // must always be single items. 2-tuples should be tested in state. 475 t.Errorf("Invalid request tuples: len(%v) or len(%v) > 1", reqs[i], reqs[i+1]) 476 } 477 if bytes.Compare(compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) > 0 { 478 t.Errorf("Invalid request order: %v before %v", compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) 479 } 480 } 481 }