github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/eth/downloader/skeleton_test.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math/big" 24 "os" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/core/rawdb" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/eth/protocols/eth" 33 "github.com/ethereum/go-ethereum/log" 34 ) 35 36 // hookedBackfiller is a tester backfiller with all interface methods mocked and 37 // hooked so tests can implement only the things they need. 38 type hookedBackfiller struct { 39 // suspendHook is an optional hook to be called when the filler is requested 40 // to be suspended. 41 suspendHook func() 42 43 // resumeHook is an optional hook to be called when the filler is requested 44 // to be resumed. 45 resumeHook func() 46 } 47 48 // newHookedBackfiller creates a hooked backfiller with all callbacks disabled, 49 // essentially acting as a noop. 50 func newHookedBackfiller() backfiller { 51 return new(hookedBackfiller) 52 } 53 54 // suspend requests the backfiller to abort any running full or snap sync 55 // based on the skeleton chain as it might be invalid. The backfiller should 56 // gracefully handle multiple consecutive suspends without a resume, even 57 // on initial sartup. 58 func (hf *hookedBackfiller) suspend() *types.Header { 59 if hf.suspendHook != nil { 60 hf.suspendHook() 61 } 62 return nil // we don't really care about header cleanups for now 63 } 64 65 // resume requests the backfiller to start running fill or snap sync based on 66 // the skeleton chain as it has successfully been linked. Appending new heads 67 // to the end of the chain will not result in suspend/resume cycles. 68 func (hf *hookedBackfiller) resume() { 69 if hf.resumeHook != nil { 70 hf.resumeHook() 71 } 72 } 73 74 // skeletonTestPeer is a mock peer that can only serve header requests from a 75 // pre-perated header chain (which may be arbitrarily wrong for testing). 76 // 77 // Requesting anything else from these peers will hard panic. Note, do *not* 78 // implement any other methods. We actually want to make sure that the skeleton 79 // syncer only depends on - and will only ever do so - on header requests. 80 type skeletonTestPeer struct { 81 id string // Unique identifier of the mock peer 82 headers []*types.Header // Headers to serve when requested 83 84 serve func(origin uint64) []*types.Header // Hook to allow custom responses 85 86 served uint64 // Number of headers served by this peer 87 dropped uint64 // Flag whether the peer was dropped (stop responding) 88 } 89 90 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with. 91 func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer { 92 return &skeletonTestPeer{ 93 id: id, 94 headers: headers, 95 } 96 } 97 98 // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with, 99 // and sets an optional serve hook that can return headers for delivery instead 100 // of the predefined chain. Useful for emulating malicious behavior that would 101 // otherwise require dedicated peer types. 102 func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer { 103 return &skeletonTestPeer{ 104 id: id, 105 headers: headers, 106 serve: serve, 107 } 108 } 109 110 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 111 // origin; associated with a particular peer in the download tester. The returned 112 // function can be used to retrieve batches of headers from the particular peer. 113 func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 114 // Since skeleton test peer are in-memory mocks, dropping the does not make 115 // them inaccepssible. As such, check a local `dropped` field to see if the 116 // peer has been dropped and should not respond any more. 117 if atomic.LoadUint64(&p.dropped) != 0 { 118 return nil, errors.New("peer already dropped") 119 } 120 // Skeleton sync retrieves batches of headers going backward without gaps. 121 // This ensures we can follow a clean parent progression without any reorg 122 // hiccups. There is no need for any other type of header retrieval, so do 123 // panic if there's such a request. 124 if !reverse || skip != 0 { 125 // Note, if other clients want to do these kinds of requests, it's their 126 // problem, it will still work. We just don't want *us* making complicated 127 // requests without a very strong reason to. 128 panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip)) 129 } 130 // If the skeleton syncer requests the genesis block, panic. Whilst it could 131 // be considered a valid request, our code specifically should not request it 132 // ever since we want to link up headers to an existing local chain, which at 133 // worse will be the genesis. 134 if int64(origin)-int64(amount) < 0 { 135 panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount)) 136 } 137 // To make concurrency easier, the skeleton syncer always requests fixed size 138 // batches of headers. Panic if the peer is requested an amount other than the 139 // configured batch size (apart from the request leading to the genesis). 140 if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) { 141 panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin)) 142 } 143 // Simple reverse header retrieval. Fill from the peer's chain and return. 144 // If the tester has a serve hook set, try to use that before falling back 145 // to the default behavior. 146 var headers []*types.Header 147 if p.serve != nil { 148 headers = p.serve(origin) 149 } 150 if headers == nil { 151 headers = make([]*types.Header, 0, amount) 152 if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin 153 for i := 0; i < amount; i++ { 154 // Consider nil headers as a form of attack and withhold them. Nil 155 // cannot be decoded from RLP, so it's not possible to produce an 156 // attack by sending/receiving those over eth. 157 header := p.headers[int(origin)-i] 158 if header == nil { 159 continue 160 } 161 headers = append(headers, header) 162 } 163 } 164 } 165 atomic.AddUint64(&p.served, uint64(len(headers))) 166 167 hashes := make([]common.Hash, len(headers)) 168 for i, header := range headers { 169 hashes[i] = header.Hash() 170 } 171 // Deliver the headers to the downloader 172 req := ð.Request{ 173 Peer: p.id, 174 } 175 res := ð.Response{ 176 Req: req, 177 Res: (*eth.BlockHeadersPacket)(&headers), 178 Meta: hashes, 179 Time: 1, 180 Done: make(chan error), 181 } 182 go func() { 183 sink <- res 184 if err := <-res.Done; err != nil { 185 log.Warn("Skeleton test peer response rejected", "err", err) 186 atomic.AddUint64(&p.dropped, 1) 187 } 188 }() 189 return req, nil 190 } 191 192 func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) { 193 panic("skeleton sync must not request the remote head") 194 } 195 196 func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) { 197 panic("skeleton sync must not request headers by hash") 198 } 199 200 func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { 201 panic("skeleton sync must not request block bodies") 202 } 203 204 func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { 205 panic("skeleton sync must not request receipts") 206 } 207 208 // Tests various sync initialzations based on previous leftovers in the database 209 // and announced heads. 210 func TestSkeletonSyncInit(t *testing.T) { 211 // Create a few key headers 212 var ( 213 genesis = &types.Header{Number: big.NewInt(0)} 214 block49 = &types.Header{Number: big.NewInt(49)} 215 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 216 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 217 ) 218 tests := []struct { 219 headers []*types.Header // Database content (beside the genesis) 220 oldstate []*subchain // Old sync state with various interrupted subchains 221 head *types.Header // New head header to announce to reorg to 222 newstate []*subchain // Expected sync state after the reorg 223 }{ 224 // Completely empty database with only the genesis set. The sync is expected 225 // to create a single subchain with the requested head. 226 { 227 head: block50, 228 newstate: []*subchain{{Head: 50, Tail: 50}}, 229 }, 230 // Empty database with only the genesis set with a leftover empty sync 231 // progess. This is a synthetic case, just for the sake of covering things. 232 { 233 oldstate: []*subchain{}, 234 head: block50, 235 newstate: []*subchain{{Head: 50, Tail: 50}}, 236 }, 237 // A single leftover subchain is present, older than the new head. The 238 // old subchain should be left as is and a new one appended to the sync 239 // status. 240 { 241 oldstate: []*subchain{{Head: 10, Tail: 5}}, 242 head: block50, 243 newstate: []*subchain{ 244 {Head: 50, Tail: 50}, 245 {Head: 10, Tail: 5}, 246 }, 247 }, 248 // Multiple leftover subchains are present, older than the new head. The 249 // old subchains should be left as is and a new one appended to the sync 250 // status. 251 { 252 oldstate: []*subchain{ 253 {Head: 20, Tail: 15}, 254 {Head: 10, Tail: 5}, 255 }, 256 head: block50, 257 newstate: []*subchain{ 258 {Head: 50, Tail: 50}, 259 {Head: 20, Tail: 15}, 260 {Head: 10, Tail: 5}, 261 }, 262 }, 263 // A single leftover subchain is present, newer than the new head. The 264 // newer subchain should be deleted and a fresh one created for the head. 265 { 266 oldstate: []*subchain{{Head: 65, Tail: 60}}, 267 head: block50, 268 newstate: []*subchain{{Head: 50, Tail: 50}}, 269 }, 270 // Multiple leftover subchain is present, newer than the new head. The 271 // newer subchains should be deleted and a fresh one created for the head. 272 { 273 oldstate: []*subchain{ 274 {Head: 75, Tail: 70}, 275 {Head: 65, Tail: 60}, 276 }, 277 head: block50, 278 newstate: []*subchain{{Head: 50, Tail: 50}}, 279 }, 280 281 // Two leftover subchains are present, one fully older and one fully 282 // newer than the announced head. The head should delete the newer one, 283 // keeping the older one. 284 { 285 oldstate: []*subchain{ 286 {Head: 65, Tail: 60}, 287 {Head: 10, Tail: 5}, 288 }, 289 head: block50, 290 newstate: []*subchain{ 291 {Head: 50, Tail: 50}, 292 {Head: 10, Tail: 5}, 293 }, 294 }, 295 // Multiple leftover subchains are present, some fully older and some 296 // fully newer than the announced head. The head should delete the newer 297 // ones, keeping the older ones. 298 { 299 oldstate: []*subchain{ 300 {Head: 75, Tail: 70}, 301 {Head: 65, Tail: 60}, 302 {Head: 20, Tail: 15}, 303 {Head: 10, Tail: 5}, 304 }, 305 head: block50, 306 newstate: []*subchain{ 307 {Head: 50, Tail: 50}, 308 {Head: 20, Tail: 15}, 309 {Head: 10, Tail: 5}, 310 }, 311 }, 312 // A single leftover subchain is present and the new head is extending 313 // it with one more header. We expect the subchain head to be pushed 314 // forward. 315 { 316 headers: []*types.Header{block49}, 317 oldstate: []*subchain{{Head: 49, Tail: 5}}, 318 head: block50, 319 newstate: []*subchain{{Head: 50, Tail: 5}}, 320 }, 321 // A single leftover subchain is present and although the new head does 322 // extend it number wise, the hash chain does not link up. We expect a 323 // new subchain to be created for the dangling head. 324 { 325 headers: []*types.Header{block49B}, 326 oldstate: []*subchain{{Head: 49, Tail: 5}}, 327 head: block50, 328 newstate: []*subchain{ 329 {Head: 50, Tail: 50}, 330 {Head: 49, Tail: 5}, 331 }, 332 }, 333 // A single leftover subchain is present. A new head is announced that 334 // links into the middle of it, correctly anchoring into an existing 335 // header. We expect the old subchain to be truncated and extended with 336 // the new head. 337 { 338 headers: []*types.Header{block49}, 339 oldstate: []*subchain{{Head: 100, Tail: 5}}, 340 head: block50, 341 newstate: []*subchain{{Head: 50, Tail: 5}}, 342 }, 343 // A single leftover subchain is present. A new head is announced that 344 // links into the middle of it, but does not anchor into an existing 345 // header. We expect the old subchain to be truncated and a new chain 346 // be created for the dangling head. 347 { 348 headers: []*types.Header{block49B}, 349 oldstate: []*subchain{{Head: 100, Tail: 5}}, 350 head: block50, 351 newstate: []*subchain{ 352 {Head: 50, Tail: 50}, 353 {Head: 49, Tail: 5}, 354 }, 355 }, 356 } 357 for i, tt := range tests { 358 // Create a fresh database and initialize it with the starting state 359 db := rawdb.NewMemoryDatabase() 360 361 rawdb.WriteHeader(db, genesis) 362 for _, header := range tt.headers { 363 rawdb.WriteSkeletonHeader(db, header) 364 } 365 if tt.oldstate != nil { 366 blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate}) 367 rawdb.WriteSkeletonSyncStatus(db, blob) 368 } 369 // Create a skeleton sync and run a cycle 370 wait := make(chan struct{}) 371 372 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 373 skeleton.syncStarting = func() { close(wait) } 374 skeleton.Sync(tt.head, true) 375 376 <-wait 377 skeleton.Terminate() 378 379 // Ensure the correct resulting sync status 380 var progress skeletonProgress 381 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 382 383 if len(progress.Subchains) != len(tt.newstate) { 384 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 385 continue 386 } 387 for j := 0; j < len(progress.Subchains); j++ { 388 if progress.Subchains[j].Head != tt.newstate[j].Head { 389 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 390 } 391 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 392 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 393 } 394 } 395 } 396 } 397 398 // Tests that a running skeleton sync can be extended with properly linked up 399 // headers but not with side chains. 400 func TestSkeletonSyncExtend(t *testing.T) { 401 // Create a few key headers 402 var ( 403 genesis = &types.Header{Number: big.NewInt(0)} 404 block49 = &types.Header{Number: big.NewInt(49)} 405 block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} 406 block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} 407 block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()} 408 ) 409 tests := []struct { 410 head *types.Header // New head header to announce to reorg to 411 extend *types.Header // New head header to announce to extend with 412 newstate []*subchain // Expected sync state after the reorg 413 err error // Whether extension succeeds or not 414 }{ 415 // Initialize a sync and try to extend it with a subsequent block. 416 { 417 head: block49, 418 extend: block50, 419 newstate: []*subchain{ 420 {Head: 50, Tail: 49}, 421 }, 422 }, 423 // Initialize a sync and try to extend it with the existing head block. 424 { 425 head: block49, 426 extend: block49, 427 newstate: []*subchain{ 428 {Head: 49, Tail: 49}, 429 }, 430 }, 431 // Initialize a sync and try to extend it with a sibling block. 432 { 433 head: block49, 434 extend: block49B, 435 newstate: []*subchain{ 436 {Head: 49, Tail: 49}, 437 }, 438 err: errReorgDenied, 439 }, 440 // Initialize a sync and try to extend it with a number-wise sequential 441 // header, but a hash wise non-linking one. 442 { 443 head: block49B, 444 extend: block50, 445 newstate: []*subchain{ 446 {Head: 49, Tail: 49}, 447 }, 448 err: errReorgDenied, 449 }, 450 // Initialize a sync and try to extend it with a non-linking future block. 451 { 452 head: block49, 453 extend: block51, 454 newstate: []*subchain{ 455 {Head: 49, Tail: 49}, 456 }, 457 err: errReorgDenied, 458 }, 459 // Initialize a sync and try to extend it with a past canonical block. 460 { 461 head: block50, 462 extend: block49, 463 newstate: []*subchain{ 464 {Head: 50, Tail: 50}, 465 }, 466 err: errReorgDenied, 467 }, 468 // Initialize a sync and try to extend it with a past sidechain block. 469 { 470 head: block50, 471 extend: block49B, 472 newstate: []*subchain{ 473 {Head: 50, Tail: 50}, 474 }, 475 err: errReorgDenied, 476 }, 477 } 478 for i, tt := range tests { 479 // Create a fresh database and initialize it with the starting state 480 db := rawdb.NewMemoryDatabase() 481 rawdb.WriteHeader(db, genesis) 482 483 // Create a skeleton sync and run a cycle 484 wait := make(chan struct{}) 485 486 skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) 487 skeleton.syncStarting = func() { close(wait) } 488 skeleton.Sync(tt.head, true) 489 490 <-wait 491 if err := skeleton.Sync(tt.extend, false); err != tt.err { 492 t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err) 493 } 494 skeleton.Terminate() 495 496 // Ensure the correct resulting sync status 497 var progress skeletonProgress 498 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 499 500 if len(progress.Subchains) != len(tt.newstate) { 501 t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) 502 continue 503 } 504 for j := 0; j < len(progress.Subchains); j++ { 505 if progress.Subchains[j].Head != tt.newstate[j].Head { 506 t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) 507 } 508 if progress.Subchains[j].Tail != tt.newstate[j].Tail { 509 t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) 510 } 511 } 512 } 513 } 514 515 // Tests that the skeleton sync correctly retrieves headers from one or more 516 // peers without duplicates or other strange side effects. 517 func TestSkeletonSyncRetrievals(t *testing.T) { 518 log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 519 520 // Since skeleton headers don't need to be meaningful, beyond a parent hash 521 // progression, create a long fake chain to test with. 522 chain := []*types.Header{{Number: big.NewInt(0)}} 523 for i := 1; i < 10000; i++ { 524 chain = append(chain, &types.Header{ 525 ParentHash: chain[i-1].Hash(), 526 Number: big.NewInt(int64(i)), 527 }) 528 } 529 tests := []struct { 530 headers []*types.Header // Database content (beside the genesis) 531 oldstate []*subchain // Old sync state with various interrupted subchains 532 533 head *types.Header // New head header to announce to reorg to 534 peers []*skeletonTestPeer // Initial peer set to start the sync with 535 midstate []*subchain // Expected sync state after initial cycle 536 midserve uint64 // Expected number of header retrievals after initial cycle 537 middrop uint64 // Expectd number of peers dropped after initial cycle 538 539 newHead *types.Header // New header to annount on top of the old one 540 newPeer *skeletonTestPeer // New peer to join the skeleton syncer 541 endstate []*subchain // Expected sync state after the post-init event 542 endserve uint64 // Expected number of header retrievals after the post-init event 543 enddrop uint64 // Expectd number of peers dropped after the post-init event 544 }{ 545 // Completely empty database with only the genesis set. The sync is expected 546 // to create a single subchain with the requested head. No peers however, so 547 // the sync should be stuck without any progression. 548 // 549 // When a new peer is added, it should detect the join and fill the headers 550 // to the genesis block. 551 { 552 head: chain[len(chain)-1], 553 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, 554 555 newPeer: newSkeletonTestPeer("test-peer", chain), 556 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 557 endserve: uint64(len(chain) - 2), // len - head - genesis 558 }, 559 // Completely empty database with only the genesis set. The sync is expected 560 // to create a single subchain with the requested head. With one valid peer, 561 // the sync is expected to complete already in the initial round. 562 // 563 // Adding a second peer should not have any effect. 564 { 565 head: chain[len(chain)-1], 566 peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, 567 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 568 midserve: uint64(len(chain) - 2), // len - head - genesis 569 570 newPeer: newSkeletonTestPeer("test-peer-2", chain), 571 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 572 endserve: uint64(len(chain) - 2), // len - head - genesis 573 }, 574 // Completely empty database with only the genesis set. The sync is expected 575 // to create a single subchain with the requested head. With many valid peers, 576 // the sync is expected to complete already in the initial round. 577 // 578 // Adding a new peer should not have any effect. 579 { 580 head: chain[len(chain)-1], 581 peers: []*skeletonTestPeer{ 582 newSkeletonTestPeer("test-peer-1", chain), 583 newSkeletonTestPeer("test-peer-2", chain), 584 newSkeletonTestPeer("test-peer-3", chain), 585 }, 586 midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 587 midserve: uint64(len(chain) - 2), // len - head - genesis 588 589 newPeer: newSkeletonTestPeer("test-peer-4", chain), 590 endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, 591 endserve: uint64(len(chain) - 2), // len - head - genesis 592 }, 593 // This test checks if a peer tries to withhold a header - *on* the sync 594 // boundary - instead of sending the requested amount. The malicious short 595 // package should not be accepted. 596 // 597 // Joining with a new peer should however unblock the sync. 598 { 599 head: chain[requestHeaders+100], 600 peers: []*skeletonTestPeer{ 601 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), 602 }, 603 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 604 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 605 middrop: 1, // penalize shortened header deliveries 606 607 newPeer: newSkeletonTestPeer("good-peer", chain), 608 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 609 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 610 enddrop: 1, // no new drops 611 }, 612 // This test checks if a peer tries to withhold a header - *off* the sync 613 // boundary - instead of sending the requested amount. The malicious short 614 // package should not be accepted. 615 // 616 // Joining with a new peer should however unblock the sync. 617 { 618 head: chain[requestHeaders+100], 619 peers: []*skeletonTestPeer{ 620 newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), 621 }, 622 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 623 midserve: requestHeaders + 101 - 3, // len - head - genesis - missing 624 middrop: 1, // penalize shortened header deliveries 625 626 newPeer: newSkeletonTestPeer("good-peer", chain), 627 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 628 endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis 629 enddrop: 1, // no new drops 630 }, 631 // This test checks if a peer tries to duplicate a header - *on* the sync 632 // boundary - instead of sending the correct sequence. The malicious duped 633 // package should not be accepted. 634 // 635 // Joining with a new peer should however unblock the sync. 636 { 637 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 638 peers: []*skeletonTestPeer{ 639 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), 640 }, 641 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 642 midserve: requestHeaders + 101 - 2, // len - head - genesis 643 middrop: 1, // penalize invalid header sequences 644 645 newPeer: newSkeletonTestPeer("good-peer", chain), 646 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 647 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 648 enddrop: 1, // no new drops 649 }, 650 // This test checks if a peer tries to duplicate a header - *off* the sync 651 // boundary - instead of sending the correct sequence. The malicious duped 652 // package should not be accepted. 653 // 654 // Joining with a new peer should however unblock the sync. 655 { 656 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 657 peers: []*skeletonTestPeer{ 658 newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), 659 }, 660 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 661 midserve: requestHeaders + 101 - 2, // len - head - genesis 662 middrop: 1, // penalize invalid header sequences 663 664 newPeer: newSkeletonTestPeer("good-peer", chain), 665 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 666 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 667 enddrop: 1, // no new drops 668 }, 669 // This test checks if a peer tries to inject a different header - *on* 670 // the sync boundary - instead of sending the correct sequence. The bad 671 // package should not be accepted. 672 // 673 // Joining with a new peer should however unblock the sync. 674 { 675 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 676 peers: []*skeletonTestPeer{ 677 newSkeletonTestPeer("header-changer", 678 append( 679 append( 680 append([]*types.Header{}, chain[:99]...), 681 &types.Header{ 682 ParentHash: chain[98].Hash(), 683 Number: big.NewInt(int64(99)), 684 GasLimit: 1, 685 }, 686 ), chain[100:]..., 687 ), 688 ), 689 }, 690 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 691 midserve: requestHeaders + 101 - 2, // len - head - genesis 692 middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? 693 694 newPeer: newSkeletonTestPeer("good-peer", chain), 695 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 696 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 697 enddrop: 1, // no new drops 698 }, 699 // This test checks if a peer tries to inject a different header - *off* 700 // the sync boundary - instead of sending the correct sequence. The bad 701 // package should not be accepted. 702 // 703 // Joining with a new peer should however unblock the sync. 704 { 705 head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary 706 peers: []*skeletonTestPeer{ 707 newSkeletonTestPeer("header-changer", 708 append( 709 append( 710 append([]*types.Header{}, chain[:50]...), 711 &types.Header{ 712 ParentHash: chain[49].Hash(), 713 Number: big.NewInt(int64(50)), 714 GasLimit: 1, 715 }, 716 ), chain[51:]..., 717 ), 718 ), 719 }, 720 midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, 721 midserve: requestHeaders + 101 - 2, // len - head - genesis 722 middrop: 1, // different set of headers, drop 723 724 newPeer: newSkeletonTestPeer("good-peer", chain), 725 endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, 726 endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis 727 enddrop: 1, // no new drops 728 }, 729 // This test reproduces a bug caught during review (kudos to @holiman) 730 // where a subchain is merged with a previously interrupted one, causing 731 // pending data in the scratch space to become "invalid" (since we jump 732 // ahead during subchain merge). In that case it is expected to ignore 733 // the queued up data instead of trying to process on top of a shifted 734 // task set. 735 // 736 // The test is a bit convoluted since it needs to trigger a concurrency 737 // issue. First we sync up an initial chain of 2x512 items. Then announce 738 // 2x512+2 as head and delay delivering the head batch to fill the scratch 739 // space first. The delivery head should merge with the previous download 740 // and the scratch space must not be consumed further. 741 { 742 head: chain[2*requestHeaders], 743 peers: []*skeletonTestPeer{ 744 newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header { 745 if origin == chain[2*requestHeaders+1].Number.Uint64() { 746 time.Sleep(100 * time.Millisecond) 747 } 748 return nil // Fallback to default behavior, just delayed 749 }), 750 newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header { 751 if origin == chain[2*requestHeaders+1].Number.Uint64() { 752 time.Sleep(100 * time.Millisecond) 753 } 754 return nil // Fallback to default behavior, just delayed 755 }), 756 }, 757 midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, 758 midserve: 2*requestHeaders - 1, // len - head - genesis 759 760 newHead: chain[2*requestHeaders+2], 761 endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, 762 endserve: 4 * requestHeaders, 763 }, 764 } 765 for i, tt := range tests { 766 // Create a fresh database and initialize it with the starting state 767 db := rawdb.NewMemoryDatabase() 768 rawdb.WriteHeader(db, chain[0]) 769 770 // Create a peer set to feed headers through 771 peerset := newPeerSet() 772 for _, peer := range tt.peers { 773 peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) 774 } 775 // Create a peer dropper to track malicious peers 776 dropped := make(map[string]int) 777 drop := func(peer string) { 778 if p := peerset.Peer(peer); p != nil { 779 atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1) 780 } 781 peerset.Unregister(peer) 782 dropped[peer]++ 783 } 784 // Create a skeleton sync and run a cycle 785 skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller()) 786 skeleton.Sync(tt.head, true) 787 788 var progress skeletonProgress 789 // Wait a bit (bleah) for the initial sync loop to go to idle. This might 790 // be either a finish or a never-start hence why there's no event to hook. 791 check := func() error { 792 if len(progress.Subchains) != len(tt.midstate) { 793 return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) 794 795 } 796 for j := 0; j < len(progress.Subchains); j++ { 797 if progress.Subchains[j].Head != tt.midstate[j].Head { 798 return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) 799 } 800 if progress.Subchains[j].Tail != tt.midstate[j].Tail { 801 return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) 802 } 803 } 804 return nil 805 } 806 807 waitStart := time.Now() 808 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { 809 time.Sleep(waitTime) 810 // Check the post-init end state if it matches the required results 811 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 812 if err := check(); err == nil { 813 break 814 } 815 } 816 if err := check(); err != nil { 817 t.Error(err) 818 continue 819 } 820 var served uint64 821 for _, peer := range tt.peers { 822 served += atomic.LoadUint64(&peer.served) 823 } 824 if served != tt.midserve { 825 t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) 826 } 827 var drops uint64 828 for _, peer := range tt.peers { 829 drops += atomic.LoadUint64(&peer.dropped) 830 } 831 if drops != tt.middrop { 832 t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 833 } 834 // Apply the post-init events if there's any 835 if tt.newHead != nil { 836 skeleton.Sync(tt.newHead, true) 837 } 838 if tt.newPeer != nil { 839 if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { 840 t.Errorf("test %d: failed to register new peer: %v", i, err) 841 } 842 } 843 // Wait a bit (bleah) for the second sync loop to go to idle. This might 844 // be either a finish or a never-start hence why there's no event to hook. 845 check = func() error { 846 if len(progress.Subchains) != len(tt.endstate) { 847 return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) 848 } 849 for j := 0; j < len(progress.Subchains); j++ { 850 if progress.Subchains[j].Head != tt.endstate[j].Head { 851 return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) 852 } 853 if progress.Subchains[j].Tail != tt.endstate[j].Tail { 854 return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) 855 } 856 } 857 return nil 858 } 859 waitStart = time.Now() 860 for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { 861 time.Sleep(waitTime) 862 // Check the post-init end state if it matches the required results 863 json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) 864 if err := check(); err == nil { 865 break 866 } 867 } 868 if err := check(); err != nil { 869 t.Error(err) 870 continue 871 } 872 // Check that the peers served no more headers than we actually needed 873 served = 0 874 for _, peer := range tt.peers { 875 served += atomic.LoadUint64(&peer.served) 876 } 877 if tt.newPeer != nil { 878 served += atomic.LoadUint64(&tt.newPeer.served) 879 } 880 if served != tt.endserve { 881 t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) 882 } 883 drops = 0 884 for _, peer := range tt.peers { 885 drops += atomic.LoadUint64(&peer.dropped) 886 } 887 if tt.newPeer != nil { 888 drops += atomic.LoadUint64(&tt.newPeer.dropped) 889 } 890 if drops != tt.middrop { 891 t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) 892 } 893 // Clean up any leftover skeleton sync resources 894 skeleton.Terminate() 895 } 896 }