github.com/theQRL/go-zond@v0.1.1/zond/downloader/skeleton_test.go (about) 1 // Copyright 2022 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math/big" 24 "sync/atomic" 25 "testing" 26 "time" 27 28 "github.com/theQRL/go-zond/common" 29 "github.com/theQRL/go-zond/core/rawdb" 30 "github.com/theQRL/go-zond/core/types" 31 "github.com/theQRL/go-zond/log" 32 "github.com/theQRL/go-zond/zond/protocols/zond" 33 ) 34 35 // hookedBackfiller is a tester backfiller with all interface methods mocked and 36 // hooked so tests can implement only the things they need. 37 type hookedBackfiller struct { 38 // suspendHook is an optional hook to be called when the filler is requested 39 // to be suspended. 40 suspendHook func() *types.Header 41 42 // resumeHook is an optional hook to be called when the filler is requested 43 // to be resumed. 44 resumeHook func() 45 } 46 47 // newHookedBackfiller creates a hooked backfiller with all callbacks disabled, 48 // essentially acting as a noop. 49 func newHookedBackfiller() backfiller { 50 return new(hookedBackfiller) 51 } 52 53 // suspend requests the backfiller to abort any running full or snap sync 54 // based on the skeleton chain as it might be invalid. The backfiller should 55 // gracefully handle multiple consecutive suspends without a resume, even 56 // on initial startup. 57 func (hf *hookedBackfiller) suspend() *types.Header { 58 if hf.suspendHook != nil { 59 return hf.suspendHook() 60 } 61 return nil // we don't really care about header cleanups for now 62 } 63 64 // resume requests the backfiller to start running fill or snap sync based on 65 // the skeleton chain as it has successfully been linked. Appending new heads 66 // to the end of the chain will not result in suspend/resume cycles. 67 func (hf *hookedBackfiller) resume() { 68 if hf.resumeHook != nil { 69 hf.resumeHook() 70 } 71 } 72 73 // skeletonTestPeer is a mock peer that can only serve header requests from a 74 // pre-perated header chain (which may be arbitrarily wrong for testing). 75 // 76 // Requesting anything else from these peers will hard panic. Note, do *not* 77 // implement any other methods. We actually want to make sure that the skeleton 78 // syncer only depends on - and will only ever do so - on header requests. 79 type skeletonTestPeer struct { 80 id string // Unique identifier of the mock peer 81 headers []*types.Header // Headers to serve when requested 82 83 serve func(origin uint64) []*types.Header // Hook to allow custom responses 84 85 served atomic.Uint64 // Number of headers served by this peer 86 dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding) 87 } 88 89 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with. 90 func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer { 91 return &skeletonTestPeer{ 92 id: id, 93 headers: headers, 94 } 95 } 96 97 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with, 98 // and sets an optional serve hook that can return headers for delivery instead 99 // of the predefined chain. Useful for emulating malicious behavior that would 100 // otherwise require dedicated peer types. 101 func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer { 102 return &skeletonTestPeer{ 103 id: id, 104 headers: headers, 105 serve: serve, 106 } 107 } 108 109 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 110 // origin; associated with a particular peer in the download tester. The returned 111 // function can be used to retrieve batches of headers from the particular peer. 112 func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *zond.Response) (*zond.Request, error) { 113 // Since skeleton test peer are in-memory mocks, dropping the does not make 114 // them inaccessible. As such, check a local `dropped` field to see if the 115 // peer has been dropped and should not respond any more. 116 if p.dropped.Load() != 0 { 117 return nil, errors.New("peer already dropped") 118 } 119 // Skeleton sync retrieves batches of headers going backward without gaps. 120 // This ensures we can follow a clean parent progression without any reorg 121 // hiccups. There is no need for any other type of header retrieval, so do 122 // panic if there's such a request. 123 if !reverse || skip != 0 { 124 // Note, if other clients want to do these kinds of requests, it's their 125 // problem, it will still work. We just don't want *us* making complicated 126 // requests without a very strong reason to. 127 panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip)) 128 } 129 // If the skeleton syncer requests the genesis block, panic. Whilst it could 130 // be considered a valid request, our code specifically should not request it 131 // ever since we want to link up headers to an existing local chain, which at 132 // worse will be the genesis. 133 if int64(origin)-int64(amount) < 0 { 134 panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount)) 135 } 136 // To make concurrency easier, the skeleton syncer always requests fixed size 137 // batches of headers. Panic if the peer is requested an amount other than the 138 // configured batch size (apart from the request leading to the genesis). 139 if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) { 140 panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin)) 141 } 142 // Simple reverse header retrieval. Fill from the peer's chain and return. 143 // If the tester has a serve hook set, try to use that before falling back 144 // to the default behavior. 145 var headers []*types.Header 146 if p.serve != nil { 147 headers = p.serve(origin) 148 } 149 if headers == nil { 150 headers = make([]*types.Header, 0, amount) 151 if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin 152 for i := 0; i < amount; i++ { 153 // Consider nil headers as a form of attack and withhold them. Nil 154 // cannot be decoded from RLP, so it's not possible to produce an 155 // attack by sending/receiving those over zond. 156 header := p.headers[int(origin)-i] 157 if header == nil { 158 continue 159 } 160 headers = append(headers, header) 161 } 162 } 163 } 164 p.served.Add(uint64(len(headers))) 165 166 hashes := make([]common.Hash, len(headers)) 167 for i, header := range headers { 168 hashes[i] = header.Hash() 169 } 170 // Deliver the headers to the downloader 171 req := &zond.Request{ 172 Peer: p.id, 173 } 174 res := &zond.Response{ 175 Req: req, 176 Res: (*zond.BlockHeadersPacket)(&headers), 177 Meta: hashes, 178 Time: 1, 179 Done: make(chan error), 180 } 181 go func() { 182 sink <- res 183 if err := <-res.Done; err != nil { 184 log.Warn("Skeleton test peer response rejected", "err", err) 185 p.dropped.Add(1) 186 } 187 }() 188 return req, nil 189 } 190 191 func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) { 192 panic("skeleton sync must not request the remote head") 193 } 194 195 func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *zond.Response) (*zond.Request, error) { 196 panic("skeleton sync must not request headers by hash") 197 } 198 199 func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *zond.Response) (*zond.Request, error) { 200 panic("skeleton sync must not request block bodies") 201 } 202 203 func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *zond.Response) (*zond.Request, error) { 204 panic("skeleton sync must not request receipts") 205 } 206 207 // Tests various sync initializations based on previous leftovers in the database 208 // and announced heads. 209 func TestSkeletonSyncInit(t *testing.T) { 210 // Create a few key headers 211 var ( 212 genesis = &types.Header{Number: big.NewInt(0)} 213 block49 = &types.Header{Number: big.NewInt(49)} 214 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 215 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 216 ) 217 tests := []struct { 218 headers []*types.Header // Database content (beside the genesis) 219 oldstate []*subchain // Old sync state with various interrupted subchains 220 head *types.Header // New head header to announce to reorg to 221 newstate []*subchain // Expected sync state after the reorg 222 }{ 223 // Completely empty database with only the genesis set. The sync is expected 224 // to create a single subchain with the requested head. 225 { 226 head: block50, 227 newstate: []*subchain{{Head: 50, Tail: 50}}, 228 }, 229 // Empty database with only the genesis set with a leftover empty sync 230 // progress. This is a synthetic case, just for the sake of covering things. 231 { 232 oldstate: []*subchain{}, 233 head: block50, 234 newstate: []*subchain{{Head: 50, Tail: 50}}, 235 }, 236 // A single leftover subchain is present, older than the new head. The 237 // old subchain should be left as is and a new one appended to the sync 238 // status. 239 { 240 oldstate: []*subchain{{Head: 10, Tail: 5}}, 241 head: block50, 242 newstate: []*subchain{ 243 {Head: 50, Tail: 50}, 244 {Head: 10, Tail: 5}, 245 }, 246 }, 247 // Multiple leftover subchains are present, older than the new head. The 248 // old subchains should be left as is and a new one appended to the sync 249 // status. 250 { 251 oldstate: []*subchain{ 252 {Head: 20, Tail: 15}, 253 {Head: 10, Tail: 5}, 254 }, 255 head: block50, 256 newstate: []*subchain{ 257 {Head: 50, Tail: 50}, 258 {Head: 20, Tail: 15}, 259 {Head: 10, Tail: 5}, 260 }, 261 }, 262 // A single leftover subchain is present, newer than the new head. The 263 // newer subchain should be deleted and a fresh one created for the head. 264 { 265 oldstate: []*subchain{{Head: 65, Tail: 60}}, 266 head: block50, 267 newstate: []*subchain{{Head: 50, Tail: 50}}, 268 }, 269 // Multiple leftover subchain is present, newer than the new head. The 270 // newer subchains should be deleted and a fresh one created for the head. 271 { 272 oldstate: []*subchain{ 273 {Head: 75, Tail: 70}, 274 {Head: 65, Tail: 60}, 275 }, 276 head: block50, 277 newstate: []*subchain{{Head: 50, Tail: 50}}, 278 }, 279 280 // Two leftover subchains are present, one fully older and one fully 281 // newer than the announced head. The head should delete the newer one, 282 // keeping the older one. 283 { 284 oldstate: []*subchain{ 285 {Head: 65, Tail: 60}, 286 {Head: 10, Tail: 5}, 287 }, 288 head: block50, 289 newstate: []*subchain{ 290 {Head: 50, Tail: 50}, 291 {Head: 10, Tail: 5}, 292 }, 293 }, 294 // Multiple leftover subchains are present, some fully older and some 295 // fully newer than the announced head. The head should delete the newer 296 // ones, keeping the older ones. 297 { 298 oldstate: []*subchain{ 299 {Head: 75, Tail: 70}, 300 {Head: 65, Tail: 60}, 301 {Head: 20, Tail: 15}, 302 {Head: 10, Tail: 5}, 303 }, 304 head: block50, 305 newstate: []*subchain{ 306 {Head: 50, Tail: 50}, 307 {Head: 20, Tail: 15}, 308 {Head: 10, Tail: 5}, 309 }, 310 }, 311 // A single leftover subchain is present and the new head is extending 312 // it with one more header. We expect the subchain head to be pushed 313 // forward. 314 { 315 headers: []*types.Header{block49}, 316 oldstate: []*subchain{{Head: 49, Tail: 5}}, 317 head: block50, 318 newstate: []*subchain{{Head: 50, Tail: 5}}, 319 }, 320 // A single leftover subchain is present and although the new head does 321 // extend it number wise, the hash chain does not link up. We expect a 322 // new subchain to be created for the dangling head. 323 { 324 headers: []*types.Header{block49B}, 325 oldstate: []*subchain{{Head: 49, Tail: 5}}, 326 head: block50, 327 newstate: []*subchain{ 328 {Head: 50, Tail: 50}, 329 {Head: 49, Tail: 5}, 330 }, 331 }, 332 // A single leftover subchain is present. A new head is announced that 333 // links into the middle of it, correctly anchoring into an existing 334 // header. We expect the old subchain to be truncated and extended with 335 // the new head. 336 { 337 headers: []*types.Header{block49}, 338 oldstate: []*subchain{{Head: 100, Tail: 5}}, 339 head: block50, 340 newstate: []*subchain{{Head: 50, Tail: 5}}, 341 }, 342 // A single leftover subchain is present. A new head is announced that 343 // links into the middle of it, but does not anchor into an existing 344 // header. We expect the old subchain to be truncated and a new chain 345 // be created for the dangling head. 346 { 347 headers: []*types.Header{block49B}, 348 oldstate: []*subchain{{Head: 100, Tail: 5}}, 349 head: block50, 350 newstate: []*subchain{ 351 {Head: 50, Tail: 50}, 352 {Head: 49, Tail: 5}, 353 }, 354 }, 355 } 356 for i, tt := range tests { 357 // Create a fresh database and initialize it with the starting state 358 db := rawdb.NewMemoryDatabase() 359 360 rawdb.WriteHeader(db, genesis) 361 for _, header := range tt.headers { 362 rawdb.WriteSkeletonHeader(db, header) 363 } 364 if tt.oldstate != nil { 365 blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate}) 366 rawdb.WriteSkeletonSyncStatus(db, blob) 367 } 368 // Create a skeleton sync and run a cycle 369 wait := make(chan struct{}) 370 371 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 372 skeleton.syncStarting = func() { close(wait) } 373 skeleton.Sync(tt.head, nil, true) 374 375 <-wait 376 skeleton.Terminate() 377 378 // Ensure the correct resulting sync status 379 var progress skeletonProgress 380 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 381 382 if len(progress.Subchains) != len(tt.newstate) { 383 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 384 continue 385 } 386 for j := 0; j < len(progress.Subchains); j++ { 387 if progress.Subchains[j].Head != tt.newstate[j].Head { 388 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 389 } 390 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 391 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 392 } 393 } 394 } 395 } 396 397 // Tests that a running skeleton sync can be extended with properly linked up 398 // headers but not with side chains. 399 func TestSkeletonSyncExtend(t *testing.T) { 400 // Create a few key headers 401 var ( 402 genesis = &types.Header{Number: big.NewInt(0)} 403 block49 = &types.Header{Number: big.NewInt(49)} 404 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 405 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 406 block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()} 407 ) 408 tests := []struct { 409 head *types.Header // New head header to announce to reorg to 410 extend *types.Header // New head header to announce to extend with 411 newstate []*subchain // Expected sync state after the reorg 412 err error // Whether extension succeeds or not 413 }{ 414 // Initialize a sync and try to extend it with a subsequent block. 415 { 416 head: block49, 417 extend: block50, 418 newstate: []*subchain{ 419 {Head: 50, Tail: 49}, 420 }, 421 }, 422 // Initialize a sync and try to extend it with the existing head block. 423 { 424 head: block49, 425 extend: block49, 426 newstate: []*subchain{ 427 {Head: 49, Tail: 49}, 428 }, 429 }, 430 // Initialize a sync and try to extend it with a sibling block. 431 { 432 head: block49, 433 extend: block49B, 434 newstate: []*subchain{ 435 {Head: 49, Tail: 49}, 436 }, 437 err: errReorgDenied, 438 }, 439 // Initialize a sync and try to extend it with a number-wise sequential 440 // header, but a hash wise non-linking one. 441 { 442 head: block49B, 443 extend: block50, 444 newstate: []*subchain{ 445 {Head: 49, Tail: 49}, 446 }, 447 err: errReorgDenied, 448 }, 449 // Initialize a sync and try to extend it with a non-linking future block. 450 { 451 head: block49, 452 extend: block51, 453 newstate: []*subchain{ 454 {Head: 49, Tail: 49}, 455 }, 456 err: errReorgDenied, 457 }, 458 // Initialize a sync and try to extend it with a past canonical block. 459 { 460 head: block50, 461 extend: block49, 462 newstate: []*subchain{ 463 {Head: 50, Tail: 50}, 464 }, 465 err: errReorgDenied, 466 }, 467 // Initialize a sync and try to extend it with a past sidechain block. 468 { 469 head: block50, 470 extend: block49B, 471 newstate: []*subchain{ 472 {Head: 50, Tail: 50}, 473 }, 474 err: errReorgDenied, 475 }, 476 } 477 for i, tt := range tests { 478 // Create a fresh database and initialize it with the starting state 479 db := rawdb.NewMemoryDatabase() 480 rawdb.WriteHeader(db, genesis) 481 482 // Create a skeleton sync and run a cycle 483 wait := make(chan struct{}) 484 485 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 486 skeleton.syncStarting = func() { close(wait) } 487 skeleton.Sync(tt.head, nil, true) 488 489 <-wait 490 if err := skeleton.Sync(tt.extend, nil, false); err != tt.err { 491 t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err) 492 } 493 skeleton.Terminate() 494 495 // Ensure the correct resulting sync status 496 var progress skeletonProgress 497 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 498 499 if len(progress.Subchains) != len(tt.newstate) { 500 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 501 continue 502 } 503 for j := 0; j < len(progress.Subchains); j++ { 504 if progress.Subchains[j].Head != tt.newstate[j].Head { 505 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 506 } 507 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 508 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 509 } 510 } 511 } 512 } 513 514 // Tests that the skeleton sync correctly retrieves headers from one or more 515 // peers without duplicates or other strange side effects. 516 func TestSkeletonSyncRetrievals(t *testing.T) { 517 //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 518 519 // Since skeleton headers don't need to be meaningful, beyond a parent hash 520 // progression, create a long fake chain to test with. 521 chain := []*types.Header{{Number: big.NewInt(0)}} 522 for i := 1; i < 10000; i++ { 523 chain = append(chain, &types.Header{ 524 ParentHash: chain[i-1].Hash(), 525 Number: big.NewInt(int64(i)), 526 }) 527 } 528 // Some tests require a forking side chain to trigger cornercases. 529 var sidechain []*types.Header 530 for i := 0; i < len(chain)/2; i++ { // Fork at block #5000 531 sidechain = append(sidechain, chain[i]) 532 } 533 for i := len(chain) / 2; i < len(chain); i++ { 534 sidechain = append(sidechain, &types.Header{ 535 ParentHash: sidechain[i-1].Hash(), 536 Number: big.NewInt(int64(i)), 537 Extra: []byte("B"), // force a different hash 538 }) 539 } 540 tests := []struct { 541 fill bool // Whether to run a real backfiller in this test case 542 unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments 543 544 head *types.Header // New head header to announce to reorg to 545 peers []*skeletonTestPeer // Initial peer set to start the sync with 546 midstate []*subchain // Expected sync state after initial cycle 547 midserve uint64 // Expected number of header retrievals after initial cycle 548 middrop uint64 // Expected number of peers dropped after initial cycle 549 550 newHead *types.Header // New header to anoint on top of the old one 551 newPeer *skeletonTestPeer // New peer to join the skeleton syncer 552 endstate []*subchain // Expected sync state after the post-init event 553 endserve uint64 // Expected number of header retrievals after the post-init event 554 enddrop uint64 // Expected number of peers dropped after the post-init event 555 }{ 556 // Completely empty database with only the genesis set. The sync is expected 557 // to create a single subchain with the requested head. No peers however, so 558 // the sync should be stuck without any progression. 559 // 560 // When a new peer is added, it should detect the join and fill the headers 561 // to the genesis block. 562 { 563 head: chain[len(chain)-1], 564 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, 565 566 newPeer: newSkeletonTestPeer("test-peer", chain), 567 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 568 endserve: uint64(len(chain) - 2), // len - head - genesis 569 }, 570 // Completely empty database with only the genesis set. The sync is expected 571 // to create a single subchain with the requested head. With one valid peer, 572 // the sync is expected to complete already in the initial round. 573 // 574 // Adding a second peer should not have any effect. 575 { 576 head: chain[len(chain)-1], 577 peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, 578 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 579 midserve: uint64(len(chain) - 2), // len - head - genesis 580 581 newPeer: newSkeletonTestPeer("test-peer-2", chain), 582 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 583 endserve: uint64(len(chain) - 2), // len - head - genesis 584 }, 585 // Completely empty database with only the genesis set. The sync is expected 586 // to create a single subchain with the requested head. With many valid peers, 587 // the sync is expected to complete already in the initial round. 588 // 589 // Adding a new peer should not have any effect. 590 { 591 head: chain[len(chain)-1], 592 peers: []*skeletonTestPeer{ 593 newSkeletonTestPeer("test-peer-1", chain), 594 newSkeletonTestPeer("test-peer-2", chain), 595 newSkeletonTestPeer("test-peer-3", chain), 596 }, 597 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 598 midserve: uint64(len(chain) - 2), // len - head - genesis 599 600 newPeer: newSkeletonTestPeer("test-peer-4", chain), 601 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 602 endserve: uint64(len(chain) - 2), // len - head - genesis 603 }, 604 // This test checks if a peer tries to withhold a header - *on* the sync 605 // boundary - instead of sending the requested amount. The malicious short 606 // package should not be accepted. 607 // 608 // Joining with a new peer should however unblock the sync. 609 { 610 head: chain[requestHeaders+100], 611 peers: []*skeletonTestPeer{ 612 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), 613 }, 614 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 615 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 616 middrop: 1, // penalize shortened header deliveries 617 618 newPeer: newSkeletonTestPeer("good-peer", chain), 619 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 620 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 621 enddrop: 1, // no new drops 622 }, 623 // This test checks if a peer tries to withhold a header - *off* the sync 624 // boundary - instead of sending the requested amount. The malicious short 625 // package should not be accepted. 626 // 627 // Joining with a new peer should however unblock the sync. 628 { 629 head: chain[requestHeaders+100], 630 peers: []*skeletonTestPeer{ 631 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), 632 }, 633 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 634 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 635 middrop: 1, // penalize shortened header deliveries 636 637 newPeer: newSkeletonTestPeer("good-peer", chain), 638 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 639 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 640 enddrop: 1, // no new drops 641 }, 642 // This test checks if a peer tries to duplicate a header - *on* the sync 643 // boundary - instead of sending the correct sequence. The malicious duped 644 // package should not be accepted. 645 // 646 // Joining with a new peer should however unblock the sync. 647 { 648 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 649 peers: []*skeletonTestPeer{ 650 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), 651 }, 652 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 653 midserve: requestHeaders + 101 - 2, // len - head - genesis 654 middrop: 1, // penalize invalid header sequences 655 656 newPeer: newSkeletonTestPeer("good-peer", chain), 657 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 658 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 659 enddrop: 1, // no new drops 660 }, 661 // This test checks if a peer tries to duplicate a header - *off* the sync 662 // boundary - instead of sending the correct sequence. The malicious duped 663 // package should not be accepted. 664 // 665 // Joining with a new peer should however unblock the sync. 666 { 667 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 668 peers: []*skeletonTestPeer{ 669 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), 670 }, 671 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 672 midserve: requestHeaders + 101 - 2, // len - head - genesis 673 middrop: 1, // penalize invalid header sequences 674 675 newPeer: newSkeletonTestPeer("good-peer", chain), 676 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 677 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 678 enddrop: 1, // no new drops 679 }, 680 // This test checks if a peer tries to inject a different header - *on* 681 // the sync boundary - instead of sending the correct sequence. The bad 682 // package should not be accepted. 683 // 684 // Joining with a new peer should however unblock the sync. 685 { 686 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 687 peers: []*skeletonTestPeer{ 688 newSkeletonTestPeer("header-changer", 689 append( 690 append( 691 append([]*types.Header{}, chain[:99]...), 692 &types.Header{ 693 ParentHash: chain[98].Hash(), 694 Number: big.NewInt(int64(99)), 695 GasLimit: 1, 696 }, 697 ), chain[100:]..., 698 ), 699 ), 700 }, 701 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 702 midserve: requestHeaders + 101 - 2, // len - head - genesis 703 middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? 704 705 newPeer: newSkeletonTestPeer("good-peer", chain), 706 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 707 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 708 enddrop: 1, // no new drops 709 }, 710 // This test checks if a peer tries to inject a different header - *off* 711 // the sync boundary - instead of sending the correct sequence. The bad 712 // package should not be accepted. 713 // 714 // Joining with a new peer should however unblock the sync. 715 { 716 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 717 peers: []*skeletonTestPeer{ 718 newSkeletonTestPeer("header-changer", 719 append( 720 append( 721 append([]*types.Header{}, chain[:50]...), 722 &types.Header{ 723 ParentHash: chain[49].Hash(), 724 Number: big.NewInt(int64(50)), 725 GasLimit: 1, 726 }, 727 ), chain[51:]..., 728 ), 729 ), 730 }, 731 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 732 midserve: requestHeaders + 101 - 2, // len - head - genesis 733 middrop: 1, // different set of headers, drop 734 735 newPeer: newSkeletonTestPeer("good-peer", chain), 736 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 737 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 738 enddrop: 1, // no new drops 739 }, 740 // This test reproduces a bug caught during review (kudos to @holiman) 741 // where a subchain is merged with a previously interrupted one, causing 742 // pending data in the scratch space to become "invalid" (since we jump 743 // ahead during subchain merge). In that case it is expected to ignore 744 // the queued up data instead of trying to process on top of a shifted 745 // task set. 746 // 747 // The test is a bit convoluted since it needs to trigger a concurrency 748 // issue. First we sync up an initial chain of 2x512 items. Then announce 749 // 2x512+2 as head and delay delivering the head batch to fill the scratch 750 // space first. The delivery head should merge with the previous download 751 // and the scratch space must not be consumed further. 752 { 753 head: chain[2*requestHeaders], 754 peers: []*skeletonTestPeer{ 755 newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header { 756 if origin == chain[2*requestHeaders+1].Number.Uint64() { 757 time.Sleep(100 * time.Millisecond) 758 } 759 return nil // Fallback to default behavior, just delayed 760 }), 761 newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header { 762 if origin == chain[2*requestHeaders+1].Number.Uint64() { 763 time.Sleep(100 * time.Millisecond) 764 } 765 return nil // Fallback to default behavior, just delayed 766 }), 767 }, 768 midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, 769 midserve: 2*requestHeaders - 1, // len - head - genesis 770 771 newHead: chain[2*requestHeaders+2], 772 endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, 773 endserve: 4 * requestHeaders, 774 }, 775 // This test reproduces a bug caught by (@rjl493456442) where a skeleton 776 // header goes missing, causing the sync to get stuck and/or panic. 777 // 778 // The setup requires a previously successfully synced chain up to a block 779 // height N. That results is a single skeleton header (block N) and a single 780 // subchain (head N, Tail N) being stored on disk. 781 // 782 // The following step requires a new sync cycle to a new side chain of a 783 // height higher than N, and an ancestor lower than N (e.g. N-2, N+2). 784 // In this scenario, when processing a batch of headers, a link point of 785 // N-2 will be found, meaning that N-1 and N have been overwritten. 786 // 787 // The link event triggers an early exit, noticing that the previous sub- 788 // chain is a leftover and deletes it (with it's skeleton header N). But 789 // since skeleton header N has been overwritten to the new side chain, we 790 // end up losing it and creating a gap. 791 { 792 fill: true, 793 unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty 794 795 head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 796 peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, 797 midstate: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, 798 799 newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 800 newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), 801 endstate: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, 802 }, 803 } 804 for i, tt := range tests { 805 // Create a fresh database and initialize it with the starting state 806 db := rawdb.NewMemoryDatabase() 807 808 rawdb.WriteBlock(db, types.NewBlockWithHeader(chain[0])) 809 rawdb.WriteReceipts(db, chain[0].Hash(), chain[0].Number.Uint64(), types.Receipts{}) 810 811 // Create a peer set to feed headers through 812 peerset := newPeerSet() 813 for _, peer := range tt.peers { 814 peerset.Register(newPeerConnection(peer.id, zond.ETH66, peer, log.New("id", peer.id))) 815 } 816 // Create a peer dropper to track malicious peers 817 dropped := make(map[string]int) 818 drop := func(peer string) { 819 if p := peerset.Peer(peer); p != nil { 820 p.peer.(*skeletonTestPeer).dropped.Add(1) 821 } 822 peerset.Unregister(peer) 823 dropped[peer]++ 824 } 825 // Create a backfiller if we need to run more advanced tests 826 filler := newHookedBackfiller() 827 if tt.fill { 828 var filled *types.Header 829 830 filler = &hookedBackfiller{ 831 resumeHook: func() { 832 var progress skeletonProgress 833 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 834 835 for progress.Subchains[0].Tail < progress.Subchains[0].Head { 836 header := rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail) 837 838 rawdb.WriteBlock(db, types.NewBlockWithHeader(header)) 839 rawdb.WriteReceipts(db, header.Hash(), header.Number.Uint64(), types.Receipts{}) 840 841 rawdb.DeleteSkeletonHeader(db, header.Number.Uint64()) 842 843 progress.Subchains[0].Tail++ 844 progress.Subchains[0].Next = header.Hash() 845 } 846 filled = rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail) 847 848 rawdb.WriteBlock(db, types.NewBlockWithHeader(filled)) 849 rawdb.WriteReceipts(db, filled.Hash(), filled.Number.Uint64(), types.Receipts{}) 850 }, 851 852 suspendHook: func() *types.Header { 853 prev := filled 854 filled = nil 855 856 return prev 857 }, 858 } 859 } 860 // Create a skeleton sync and run a cycle 861 skeleton := newSkeleton(db, peerset, drop, filler) 862 skeleton.Sync(tt.head, nil, true) 863 864 var progress skeletonProgress 865 // Wait a bit (bleah) for the initial sync loop to go to idle. This might 866 // be either a finish or a never-start hence why there's no event to hook. 867 check := func() error { 868 if len(progress.Subchains) != len(tt.midstate) { 869 return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) 870 } 871 for j := 0; j < len(progress.Subchains); j++ { 872 if progress.Subchains[j].Head != tt.midstate[j].Head { 873 return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) 874 } 875 if progress.Subchains[j].Tail != tt.midstate[j].Tail { 876 return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) 877 } 878 } 879 return nil 880 } 881 882 waitStart := time.Now() 883 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { 884 time.Sleep(waitTime) 885 // Check the post-init end state if it matches the required results 886 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 887 if err := check(); err == nil { 888 break 889 } 890 } 891 if err := check(); err != nil { 892 t.Error(err) 893 continue 894 } 895 if !tt.unpredictable { 896 var served uint64 897 for _, peer := range tt.peers { 898 served += peer.served.Load() 899 } 900 if served != tt.midserve { 901 t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) 902 } 903 var drops uint64 904 for _, peer := range tt.peers { 905 drops += peer.dropped.Load() 906 } 907 if drops != tt.middrop { 908 t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 909 } 910 } 911 // Apply the post-init events if there's any 912 if tt.newHead != nil { 913 skeleton.Sync(tt.newHead, nil, true) 914 } 915 if tt.newPeer != nil { 916 if err := peerset.Register(newPeerConnection(tt.newPeer.id, zond.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { 917 t.Errorf("test %d: failed to register new peer: %v", i, err) 918 } 919 } 920 // Wait a bit (bleah) for the second sync loop to go to idle. This might 921 // be either a finish or a never-start hence why there's no event to hook. 922 check = func() error { 923 if len(progress.Subchains) != len(tt.endstate) { 924 return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) 925 } 926 for j := 0; j < len(progress.Subchains); j++ { 927 if progress.Subchains[j].Head != tt.endstate[j].Head { 928 return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) 929 } 930 if progress.Subchains[j].Tail != tt.endstate[j].Tail { 931 return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) 932 } 933 } 934 return nil 935 } 936 waitStart = time.Now() 937 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { 938 time.Sleep(waitTime) 939 // Check the post-init end state if it matches the required results 940 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 941 if err := check(); err == nil { 942 break 943 } 944 } 945 if err := check(); err != nil { 946 t.Error(err) 947 continue 948 } 949 // Check that the peers served no more headers than we actually needed 950 if !tt.unpredictable { 951 served := uint64(0) 952 for _, peer := range tt.peers { 953 served += peer.served.Load() 954 } 955 if tt.newPeer != nil { 956 served += tt.newPeer.served.Load() 957 } 958 if served != tt.endserve { 959 t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) 960 } 961 drops := uint64(0) 962 for _, peer := range tt.peers { 963 drops += peer.dropped.Load() 964 } 965 if tt.newPeer != nil { 966 drops += tt.newPeer.dropped.Load() 967 } 968 if drops != tt.enddrop { 969 t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 970 } 971 } 972 // Clean up any leftover skeleton sync resources 973 skeleton.Terminate() 974 } 975 }