github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/downloader/skeleton_test.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math/big" 24 "os" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/core/rawdb" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/eth/protocols/eth" 33 "github.com/ethereum/go-ethereum/log" 34 ) 35 36 // hookedBackfiller is a tester backfiller with all interface methods mocked and 37 // hooked so tests can implement only the things they need. 38 type hookedBackfiller struct { 39 // suspendHook is an optional hook to be called when the filler is requested 40 // to be suspended. 41 suspendHook func() 42 43 // resumeHook is an optional hook to be called when the filler is requested 44 // to be resumed. 45 resumeHook func() 46 } 47 48 // newHookedBackfiller creates a hooked backfiller with all callbacks disabled, 49 // essentially acting as a noop. 50 func newHookedBackfiller() backfiller { 51 return new(hookedBackfiller) 52 } 53 54 // suspend requests the backfiller to abort any running full or snap sync 55 // based on the skeleton chain as it might be invalid. The backfiller should 56 // gracefully handle multiple consecutive suspends without a resume, even 57 // on initial sartup. 58 func (hf *hookedBackfiller) suspend() { 59 if hf.suspendHook != nil { 60 hf.suspendHook() 61 } 62 } 63 64 // resume requests the backfiller to start running fill or snap sync based on 65 // the skeleton chain as it has successfully been linked. Appending new heads 66 // to the end of the chain will not result in suspend/resume cycles. 67 func (hf *hookedBackfiller) resume() { 68 if hf.resumeHook != nil { 69 hf.resumeHook() 70 } 71 } 72 73 // skeletonTestPeer is a mock peer that can only serve header requests from a 74 // pre-perated header chain (which may be arbitrarily wrong for testing). 75 // 76 // Requesting anything else from these peers will hard panic. Note, do *not* 77 // implement any other methods. We actually want to make sure that the skeleton 78 // syncer only depends on - and will only ever do so - on header requests. 79 type skeletonTestPeer struct { 80 id string // Unique identifier of the mock peer 81 headers []*types.Header // Headers to serve when requested 82 83 serve func(origin uint64) []*types.Header // Hook to allow custom responses 84 85 served uint64 // Number of headers served by this peer 86 dropped uint64 // Flag whether the peer was dropped (stop responding) 87 } 88 89 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with. 90 func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer { 91 return &skeletonTestPeer{ 92 id: id, 93 headers: headers, 94 } 95 } 96 97 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with, 98 // and sets an optional serve hook that can return headers for delivery instead 99 // of the predefined chain. Useful for emulating malicious behavior that would 100 // otherwise require dedicated peer types. 101 func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer { 102 return &skeletonTestPeer{ 103 id: id, 104 headers: headers, 105 serve: serve, 106 } 107 } 108 109 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 110 // origin; associated with a particular peer in the download tester. The returned 111 // function can be used to retrieve batches of headers from the particular peer. 112 func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 113 // Since skeleton test peer are in-memory mocks, dropping the does not make 114 // them inaccepssible. As such, check a local `dropped` field to see if the 115 // peer has been dropped and should not respond any more. 116 if atomic.LoadUint64(&p.dropped) != 0 { 117 return nil, errors.New("peer already dropped") 118 } 119 // Skeleton sync retrieves batches of headers going backward without gaps. 120 // This ensures we can follow a clean parent progression without any reorg 121 // hiccups. There is no need for any other type of header retrieval, so do 122 // panic if there's such a request. 123 if !reverse || skip != 0 { 124 // Note, if other clients want to do these kinds of requests, it's their 125 // problem, it will still work. We just don't want *us* making complicated 126 // requests without a very strong reason to. 127 panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip)) 128 } 129 // If the skeleton syncer requests the genesis block, panic. Whilst it could 130 // be considered a valid request, our code specifically should not request it 131 // ever since we want to link up headers to an existing local chain, which at 132 // worse will be the genesis. 133 if int64(origin)-int64(amount) < 0 { 134 panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount)) 135 } 136 // To make concurrency easier, the skeleton syncer always requests fixed size 137 // batches of headers. Panic if the peer is requested an amount other than the 138 // configured batch size (apart from the request leading to the genesis). 139 if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) { 140 panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin)) 141 } 142 // Simple reverse header retrieval. Fill from the peer's chain and return. 143 // If the tester has a serve hook set, try to use that before falling back 144 // to the default behavior. 145 var headers []*types.Header 146 if p.serve != nil { 147 headers = p.serve(origin) 148 } 149 if headers == nil { 150 headers = make([]*types.Header, 0, amount) 151 if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin 152 for i := 0; i < amount; i++ { 153 // Consider nil headers as a form of attack and withhold them. Nil 154 // cannot be decoded from RLP, so it's not possible to produce an 155 // attack by sending/receiving those over eth. 156 header := p.headers[int(origin)-i] 157 if header == nil { 158 continue 159 } 160 headers = append(headers, header) 161 } 162 } 163 } 164 atomic.AddUint64(&p.served, uint64(len(headers))) 165 166 hashes := make([]common.Hash, len(headers)) 167 for i, header := range headers { 168 hashes[i] = header.Hash() 169 } 170 // Deliver the headers to the downloader 171 req := ð.Request{ 172 Peer: p.id, 173 } 174 res := ð.Response{ 175 Req: req, 176 Res: (*eth.BlockHeadersPacket)(&headers), 177 Meta: hashes, 178 Time: 1, 179 Done: make(chan error), 180 } 181 go func() { 182 sink <- res 183 if err := <-res.Done; err != nil { 184 log.Warn("Skeleton test peer response rejected", "err", err) 185 atomic.AddUint64(&p.dropped, 1) 186 } 187 }() 188 return req, nil 189 } 190 191 func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) { 192 panic("skeleton sync must not request the remote head") 193 } 194 195 func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) { 196 panic("skeleton sync must not request headers by hash") 197 } 198 199 func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { 200 panic("skeleton sync must not request block bodies") 201 } 202 203 func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { 204 panic("skeleton sync must not request receipts") 205 } 206 207 // Tests various sync initialzations based on previous leftovers in the database 208 // and announced heads. 209 func TestSkeletonSyncInit(t *testing.T) { 210 // Create a few key headers 211 var ( 212 genesis = &types.Header{Number: big.NewInt(0)} 213 block49 = &types.Header{Number: big.NewInt(49)} 214 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 215 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 216 ) 217 tests := []struct { 218 headers []*types.Header // Database content (beside the genesis) 219 oldstate []*subchain // Old sync state with various interrupted subchains 220 head *types.Header // New head header to announce to reorg to 221 newstate []*subchain // Expected sync state after the reorg 222 }{ 223 // Completely empty database with only the genesis set. The sync is expected 224 // to create a single subchain with the requested head. 225 { 226 head: block50, 227 newstate: []*subchain{{Head: 50, Tail: 50}}, 228 }, 229 // Empty database with only the genesis set with a leftover empty sync 230 // progess. This is a synthetic case, just for the sake of covering things. 231 { 232 oldstate: []*subchain{}, 233 head: block50, 234 newstate: []*subchain{{Head: 50, Tail: 50}}, 235 }, 236 // A single leftover subchain is present, older than the new head. The 237 // old subchain should be left as is and a new one appended to the sync 238 // status. 239 { 240 oldstate: []*subchain{{Head: 10, Tail: 5}}, 241 head: block50, 242 newstate: []*subchain{ 243 {Head: 50, Tail: 50}, 244 {Head: 10, Tail: 5}, 245 }, 246 }, 247 // Multiple leftover subchains are present, older than the new head. The 248 // old subchains should be left as is and a new one appended to the sync 249 // status. 250 { 251 oldstate: []*subchain{ 252 {Head: 20, Tail: 15}, 253 {Head: 10, Tail: 5}, 254 }, 255 head: block50, 256 newstate: []*subchain{ 257 {Head: 50, Tail: 50}, 258 {Head: 20, Tail: 15}, 259 {Head: 10, Tail: 5}, 260 }, 261 }, 262 // A single leftover subchain is present, newer than the new head. The 263 // newer subchain should be deleted and a fresh one created for the head. 264 { 265 oldstate: []*subchain{{Head: 65, Tail: 60}}, 266 head: block50, 267 newstate: []*subchain{{Head: 50, Tail: 50}}, 268 }, 269 // Multiple leftover subchain is present, newer than the new head. The 270 // newer subchains should be deleted and a fresh one created for the head. 271 { 272 oldstate: []*subchain{ 273 {Head: 75, Tail: 70}, 274 {Head: 65, Tail: 60}, 275 }, 276 head: block50, 277 newstate: []*subchain{{Head: 50, Tail: 50}}, 278 }, 279 280 // Two leftover subchains are present, one fully older and one fully 281 // newer than the announced head. The head should delete the newer one, 282 // keeping the older one. 283 { 284 oldstate: []*subchain{ 285 {Head: 65, Tail: 60}, 286 {Head: 10, Tail: 5}, 287 }, 288 head: block50, 289 newstate: []*subchain{ 290 {Head: 50, Tail: 50}, 291 {Head: 10, Tail: 5}, 292 }, 293 }, 294 // Multiple leftover subchains are present, some fully older and some 295 // fully newer than the announced head. The head should delete the newer 296 // ones, keeping the older ones. 297 { 298 oldstate: []*subchain{ 299 {Head: 75, Tail: 70}, 300 {Head: 65, Tail: 60}, 301 {Head: 20, Tail: 15}, 302 {Head: 10, Tail: 5}, 303 }, 304 head: block50, 305 newstate: []*subchain{ 306 {Head: 50, Tail: 50}, 307 {Head: 20, Tail: 15}, 308 {Head: 10, Tail: 5}, 309 }, 310 }, 311 // A single leftover subchain is present and the new head is extending 312 // it with one more header. We expect the subchain head to be pushed 313 // forward. 314 { 315 headers: []*types.Header{block49}, 316 oldstate: []*subchain{{Head: 49, Tail: 5}}, 317 head: block50, 318 newstate: []*subchain{{Head: 50, Tail: 5}}, 319 }, 320 // A single leftover subchain is present and although the new head does 321 // extend it number wise, the hash chain does not link up. We expect a 322 // new subchain to be created for the dangling head. 323 { 324 headers: []*types.Header{block49B}, 325 oldstate: []*subchain{{Head: 49, Tail: 5}}, 326 head: block50, 327 newstate: []*subchain{ 328 {Head: 50, Tail: 50}, 329 {Head: 49, Tail: 5}, 330 }, 331 }, 332 // A single leftover subchain is present. A new head is announced that 333 // links into the middle of it, correctly anchoring into an existing 334 // header. We expect the old subchain to be truncated and extended with 335 // the new head. 336 { 337 headers: []*types.Header{block49}, 338 oldstate: []*subchain{{Head: 100, Tail: 5}}, 339 head: block50, 340 newstate: []*subchain{{Head: 50, Tail: 5}}, 341 }, 342 // A single leftover subchain is present. A new head is announced that 343 // links into the middle of it, but does not anchor into an existing 344 // header. We expect the old subchain to be truncated and a new chain 345 // be created for the dangling head. 346 { 347 headers: []*types.Header{block49B}, 348 oldstate: []*subchain{{Head: 100, Tail: 5}}, 349 head: block50, 350 newstate: []*subchain{ 351 {Head: 50, Tail: 50}, 352 {Head: 49, Tail: 5}, 353 }, 354 }, 355 } 356 for i, tt := range tests { 357 // Create a fresh database and initialize it with the starting state 358 db := rawdb.NewMemoryDatabase() 359 360 rawdb.WriteHeader(db, genesis) 361 for _, header := range tt.headers { 362 rawdb.WriteSkeletonHeader(db, header) 363 } 364 if tt.oldstate != nil { 365 blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate}) 366 rawdb.WriteSkeletonSyncStatus(db, blob) 367 } 368 // Create a skeleton sync and run a cycle 369 wait := make(chan struct{}) 370 371 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 372 skeleton.syncStarting = func() { close(wait) } 373 skeleton.Sync(tt.head, true) 374 375 <-wait 376 skeleton.Terminate() 377 378 // Ensure the correct resulting sync status 379 var progress skeletonProgress 380 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 381 382 if len(progress.Subchains) != len(tt.newstate) { 383 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 384 continue 385 } 386 for j := 0; j < len(progress.Subchains); j++ { 387 if progress.Subchains[j].Head != tt.newstate[j].Head { 388 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 389 } 390 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 391 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 392 } 393 } 394 } 395 } 396 397 // Tests that a running skeleton sync can be extended with properly linked up 398 // headers but not with side chains. 399 func TestSkeletonSyncExtend(t *testing.T) { 400 // Create a few key headers 401 var ( 402 genesis = &types.Header{Number: big.NewInt(0)} 403 block49 = &types.Header{Number: big.NewInt(49)} 404 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 405 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 406 block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()} 407 ) 408 tests := []struct { 409 head *types.Header // New head header to announce to reorg to 410 extend *types.Header // New head header to announce to extend with 411 newstate []*subchain // Expected sync state after the reorg 412 err error // Whether extension succeeds or not 413 }{ 414 // Initialize a sync and try to extend it with a subsequent block. 415 { 416 head: block49, 417 extend: block50, 418 newstate: []*subchain{ 419 {Head: 50, Tail: 49}, 420 }, 421 }, 422 // Initialize a sync and try to extend it with the existing head block. 423 { 424 head: block49, 425 extend: block49, 426 newstate: []*subchain{ 427 {Head: 49, Tail: 49}, 428 }, 429 err: errReorgDenied, 430 }, 431 // Initialize a sync and try to extend it with a sibling block. 432 { 433 head: block49, 434 extend: block49B, 435 newstate: []*subchain{ 436 {Head: 49, Tail: 49}, 437 }, 438 err: errReorgDenied, 439 }, 440 // Initialize a sync and try to extend it with a number-wise sequential 441 // header, but a hash wise non-linking one. 442 { 443 head: block49B, 444 extend: block50, 445 newstate: []*subchain{ 446 {Head: 49, Tail: 49}, 447 }, 448 err: errReorgDenied, 449 }, 450 // Initialize a sync and try to extend it with a non-linking future block. 451 { 452 head: block49, 453 extend: block51, 454 newstate: []*subchain{ 455 {Head: 49, Tail: 49}, 456 }, 457 err: errReorgDenied, 458 }, 459 // Initialize a sync and try to extend it with a past canonical block. 460 { 461 head: block50, 462 extend: block49, 463 newstate: []*subchain{ 464 {Head: 50, Tail: 50}, 465 }, 466 err: errReorgDenied, 467 }, 468 // Initialize a sync and try to extend it with a past sidechain block. 469 { 470 head: block50, 471 extend: block49B, 472 newstate: []*subchain{ 473 {Head: 50, Tail: 50}, 474 }, 475 err: errReorgDenied, 476 }, 477 } 478 for i, tt := range tests { 479 // Create a fresh database and initialize it with the starting state 480 db := rawdb.NewMemoryDatabase() 481 rawdb.WriteHeader(db, genesis) 482 483 // Create a skeleton sync and run a cycle 484 wait := make(chan struct{}) 485 486 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 487 skeleton.syncStarting = func() { close(wait) } 488 skeleton.Sync(tt.head, true) 489 490 <-wait 491 if err := skeleton.Sync(tt.extend, false); err != tt.err { 492 t.Errorf("extension failure mismatch: have %v, want %v", err, tt.err) 493 } 494 skeleton.Terminate() 495 496 // Ensure the correct resulting sync status 497 var progress skeletonProgress 498 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 499 500 if len(progress.Subchains) != len(tt.newstate) { 501 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 502 continue 503 } 504 for j := 0; j < len(progress.Subchains); j++ { 505 if progress.Subchains[j].Head != tt.newstate[j].Head { 506 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 507 } 508 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 509 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 510 } 511 } 512 } 513 } 514 515 // Tests that the skeleton sync correctly retrieves headers from one or more 516 // peers without duplicates or other strange side effects. 517 func TestSkeletonSyncRetrievals(t *testing.T) { 518 log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 519 520 // Since skeleton headers don't need to be meaningful, beyond a parent hash 521 // progression, create a long fake chain to test with. 522 chain := []*types.Header{{Number: big.NewInt(0)}} 523 for i := 1; i < 10000; i++ { 524 chain = append(chain, &types.Header{ 525 ParentHash: chain[i-1].Hash(), 526 Number: big.NewInt(int64(i)), 527 }) 528 } 529 tests := []struct { 530 headers []*types.Header // Database content (beside the genesis) 531 oldstate []*subchain // Old sync state with various interrupted subchains 532 533 head *types.Header // New head header to announce to reorg to 534 peers []*skeletonTestPeer // Initial peer set to start the sync with 535 midstate []*subchain // Expected sync state after initial cycle 536 midserve uint64 // Expected number of header retrievals after initial cycle 537 middrop uint64 // Expectd number of peers dropped after initial cycle 538 539 newHead *types.Header // New header to annount on top of the old one 540 newPeer *skeletonTestPeer // New peer to join the skeleton syncer 541 endstate []*subchain // Expected sync state after the post-init event 542 endserve uint64 // Expected number of header retrievals after the post-init event 543 enddrop uint64 // Expectd number of peers dropped after the post-init event 544 }{ 545 // Completely empty database with only the genesis set. The sync is expected 546 // to create a single subchain with the requested head. No peers however, so 547 // the sync should be stuck without any progression. 548 // 549 // When a new peer is added, it should detect the join and fill the headers 550 // to the genesis block. 551 { 552 head: chain[len(chain)-1], 553 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, 554 555 newPeer: newSkeletonTestPeer("test-peer", chain), 556 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 557 endserve: uint64(len(chain) - 2), // len - head - genesis 558 }, 559 // Completely empty database with only the genesis set. The sync is expected 560 // to create a single subchain with the requested head. With one valid peer, 561 // the sync is expected to complete already in the initial round. 562 // 563 // Adding a second peer should not have any effect. 564 { 565 head: chain[len(chain)-1], 566 peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, 567 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 568 midserve: uint64(len(chain) - 2), // len - head - genesis 569 570 newPeer: newSkeletonTestPeer("test-peer-2", chain), 571 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 572 endserve: uint64(len(chain) - 2), // len - head - genesis 573 }, 574 // Completely empty database with only the genesis set. The sync is expected 575 // to create a single subchain with the requested head. With many valid peers, 576 // the sync is expected to complete already in the initial round. 577 // 578 // Adding a new peer should not have any effect. 579 { 580 head: chain[len(chain)-1], 581 peers: []*skeletonTestPeer{ 582 newSkeletonTestPeer("test-peer-1", chain), 583 newSkeletonTestPeer("test-peer-2", chain), 584 newSkeletonTestPeer("test-peer-3", chain), 585 }, 586 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 587 midserve: uint64(len(chain) - 2), // len - head - genesis 588 589 newPeer: newSkeletonTestPeer("test-peer-4", chain), 590 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 591 endserve: uint64(len(chain) - 2), // len - head - genesis 592 }, 593 // This test checks if a peer tries to withhold a header - *on* the sync 594 // boundary - instead of sending the requested amount. The malicious short 595 // package should not be accepted. 596 // 597 // Joining with a new peer should however unblock the sync. 598 { 599 head: chain[requestHeaders+100], 600 peers: []*skeletonTestPeer{ 601 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), 602 }, 603 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 604 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 605 middrop: 1, // penalize shortened header deliveries 606 607 newPeer: newSkeletonTestPeer("good-peer", chain), 608 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 609 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 610 enddrop: 1, // no new drops 611 }, 612 // This test checks if a peer tries to withhold a header - *off* the sync 613 // boundary - instead of sending the requested amount. The malicious short 614 // package should not be accepted. 615 // 616 // Joining with a new peer should however unblock the sync. 617 { 618 head: chain[requestHeaders+100], 619 peers: []*skeletonTestPeer{ 620 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), 621 }, 622 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 623 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 624 middrop: 1, // penalize shortened header deliveries 625 626 newPeer: newSkeletonTestPeer("good-peer", chain), 627 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 628 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 629 enddrop: 1, // no new drops 630 }, 631 // This test checks if a peer tries to duplicate a header - *on* the sync 632 // boundary - instead of sending the correct sequence. The malicious duped 633 // package should not be accepted. 634 // 635 // Joining with a new peer should however unblock the sync. 636 { 637 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 638 peers: []*skeletonTestPeer{ 639 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), 640 }, 641 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 642 midserve: requestHeaders + 101 - 2, // len - head - genesis 643 middrop: 1, // penalize invalid header sequences 644 645 newPeer: newSkeletonTestPeer("good-peer", chain), 646 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 647 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 648 enddrop: 1, // no new drops 649 }, 650 // This test checks if a peer tries to duplicate a header - *off* the sync 651 // boundary - instead of sending the correct sequence. The malicious duped 652 // package should not be accepted. 653 // 654 // Joining with a new peer should however unblock the sync. 655 { 656 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 657 peers: []*skeletonTestPeer{ 658 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), 659 }, 660 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 661 midserve: requestHeaders + 101 - 2, // len - head - genesis 662 middrop: 1, // penalize invalid header sequences 663 664 newPeer: newSkeletonTestPeer("good-peer", chain), 665 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 666 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 667 enddrop: 1, // no new drops 668 }, 669 // This test checks if a peer tries to inject a different header - *on* 670 // the sync boundary - instead of sending the correct sequence. The bad 671 // package should not be accepted. 672 // 673 // Joining with a new peer should however unblock the sync. 674 { 675 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 676 peers: []*skeletonTestPeer{ 677 newSkeletonTestPeer("header-changer", 678 append( 679 append( 680 append([]*types.Header{}, chain[:99]...), 681 &types.Header{ 682 ParentHash: chain[98].Hash(), 683 Number: big.NewInt(int64(99)), 684 GasLimit: 1, 685 }, 686 ), chain[100:]..., 687 ), 688 ), 689 }, 690 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 691 midserve: requestHeaders + 101 - 2, // len - head - genesis 692 middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? 693 694 newPeer: newSkeletonTestPeer("good-peer", chain), 695 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 696 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 697 enddrop: 1, // no new drops 698 }, 699 // This test checks if a peer tries to inject a different header - *off* 700 // the sync boundary - instead of sending the correct sequence. The bad 701 // package should not be accepted. 702 // 703 // Joining with a new peer should however unblock the sync. 704 { 705 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 706 peers: []*skeletonTestPeer{ 707 newSkeletonTestPeer("header-changer", 708 append( 709 append( 710 append([]*types.Header{}, chain[:50]...), 711 &types.Header{ 712 ParentHash: chain[49].Hash(), 713 Number: big.NewInt(int64(50)), 714 GasLimit: 1, 715 }, 716 ), chain[51:]..., 717 ), 718 ), 719 }, 720 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 721 midserve: requestHeaders + 101 - 2, // len - head - genesis 722 middrop: 1, // different set of headers, drop 723 724 newPeer: newSkeletonTestPeer("good-peer", chain), 725 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 726 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 727 enddrop: 1, // no new drops 728 }, 729 // This test reproduces a bug caught during review (kudos to @holiman) 730 // where a subchain is merged with a previously interrupted one, causing 731 // pending data in the scratch space to become "invalid" (since we jump 732 // ahead during subchain merge). In that case it is expected to ignore 733 // the queued up data instead of trying to process on top of a shifted 734 // task set. 735 // 736 // The test is a bit convoluted since it needs to trigger a concurrency 737 // issue. First we sync up an initial chain of 2x512 items. Then announce 738 // 2x512+2 as head and delay delivering the head batch to fill the scratch 739 // space first. The delivery head should merge with the previous download 740 // and the scratch space must not be consumed further. 741 { 742 head: chain[2*requestHeaders], 743 peers: []*skeletonTestPeer{ 744 newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header { 745 if origin == chain[2*requestHeaders+1].Number.Uint64() { 746 time.Sleep(100 * time.Millisecond) 747 } 748 return nil // Fallback to default behavior, just delayed 749 }), 750 newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header { 751 if origin == chain[2*requestHeaders+1].Number.Uint64() { 752 time.Sleep(100 * time.Millisecond) 753 } 754 return nil // Fallback to default behavior, just delayed 755 }), 756 }, 757 midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, 758 midserve: 2*requestHeaders - 1, // len - head - genesis 759 760 newHead: chain[2*requestHeaders+2], 761 endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, 762 endserve: 4 * requestHeaders, 763 }, 764 } 765 for i, tt := range tests { 766 // Create a fresh database and initialize it with the starting state 767 db := rawdb.NewMemoryDatabase() 768 rawdb.WriteHeader(db, chain[0]) 769 770 // Create a peer set to feed headers through 771 peerset := newPeerSet() 772 for _, peer := range tt.peers { 773 peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) 774 } 775 // Create a peer dropper to track malicious peers 776 dropped := make(map[string]int) 777 drop := func(peer string) { 778 if p := peerset.Peer(peer); p != nil { 779 atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1) 780 } 781 peerset.Unregister(peer) 782 dropped[peer]++ 783 } 784 // Create a skeleton sync and run a cycle 785 skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller()) 786 skeleton.Sync(tt.head, true) 787 788 var progress skeletonProgress 789 // Wait a bit (bleah) for the initial sync loop to go to idle. This might 790 // be either a finish or a never-start hence why there's no event to hook. 791 check := func() error { 792 if len(progress.Subchains) != len(tt.midstate) { 793 return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) 794 795 } 796 for j := 0; j < len(progress.Subchains); j++ { 797 if progress.Subchains[j].Head != tt.midstate[j].Head { 798 return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) 799 } 800 if progress.Subchains[j].Tail != tt.midstate[j].Tail { 801 return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) 802 } 803 } 804 return nil 805 } 806 807 waitStart := time.Now() 808 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { 809 time.Sleep(waitTime) 810 // Check the post-init end state if it matches the required results 811 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 812 if err := check(); err == nil { 813 break 814 } 815 } 816 if err := check(); err != nil { 817 t.Error(err) 818 continue 819 } 820 var served uint64 821 for _, peer := range tt.peers { 822 served += atomic.LoadUint64(&peer.served) 823 } 824 if served != tt.midserve { 825 t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) 826 } 827 var drops uint64 828 for _, peer := range tt.peers { 829 drops += atomic.LoadUint64(&peer.dropped) 830 } 831 if drops != tt.middrop { 832 t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 833 } 834 // Apply the post-init events if there's any 835 if tt.newHead != nil { 836 skeleton.Sync(tt.newHead, true) 837 } 838 if tt.newPeer != nil { 839 if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { 840 t.Errorf("test %d: failed to register new peer: %v", i, err) 841 } 842 } 843 // Wait a bit (bleah) for the second sync loop to go to idle. This might 844 // be either a finish or a never-start hence why there's no event to hook. 845 check = func() error { 846 if len(progress.Subchains) != len(tt.endstate) { 847 return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) 848 } 849 for j := 0; j < len(progress.Subchains); j++ { 850 if progress.Subchains[j].Head != tt.endstate[j].Head { 851 return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) 852 } 853 if progress.Subchains[j].Tail != tt.endstate[j].Tail { 854 return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) 855 } 856 } 857 return nil 858 } 859 waitStart = time.Now() 860 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { 861 time.Sleep(waitTime) 862 // Check the post-init end state if it matches the required results 863 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 864 if err := check(); err == nil { 865 break 866 } 867 } 868 if err := check(); err != nil { 869 t.Error(err) 870 continue 871 } 872 // Check that the peers served no more headers than we actually needed 873 served = 0 874 for _, peer := range tt.peers { 875 served += atomic.LoadUint64(&peer.served) 876 } 877 if tt.newPeer != nil { 878 served += atomic.LoadUint64(&tt.newPeer.served) 879 } 880 if served != tt.endserve { 881 t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) 882 } 883 drops = 0 884 for _, peer := range tt.peers { 885 drops += atomic.LoadUint64(&peer.dropped) 886 } 887 if tt.newPeer != nil { 888 drops += atomic.LoadUint64(&tt.newPeer.dropped) 889 } 890 if drops != tt.middrop { 891 t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 892 } 893 // Clean up any leftover skeleton sync resources 894 skeleton.Terminate() 895 } 896 }