github.com/Cleverse/go-ethereum@v0.0.0-20220927095127-45113064e7f2/eth/handler_eth_test.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "fmt" 21 "math/big" 22 "math/rand" 23 "sync/atomic" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/consensus" 29 "github.com/ethereum/go-ethereum/consensus/ethash" 30 "github.com/ethereum/go-ethereum/core" 31 "github.com/ethereum/go-ethereum/core/forkid" 32 "github.com/ethereum/go-ethereum/core/rawdb" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/core/vm" 35 "github.com/ethereum/go-ethereum/eth/downloader" 36 "github.com/ethereum/go-ethereum/eth/protocols/eth" 37 "github.com/ethereum/go-ethereum/event" 38 "github.com/ethereum/go-ethereum/p2p" 39 "github.com/ethereum/go-ethereum/p2p/enode" 40 "github.com/ethereum/go-ethereum/params" 41 "github.com/ethereum/go-ethereum/rlp" 42 ) 43 44 // testEthHandler is a mock event handler to listen for inbound network requests 45 // on the `eth` protocol and convert them into a more easily testable form. 46 type testEthHandler struct { 47 blockBroadcasts event.Feed 48 txAnnounces event.Feed 49 txBroadcasts event.Feed 50 } 51 52 func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } 53 func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } 54 func (h *testEthHandler) AcceptTxs() bool { return true } 55 func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } 56 func (h *testEthHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } 57 58 func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { 59 switch packet := packet.(type) { 60 case *eth.NewBlockPacket: 61 h.blockBroadcasts.Send(packet.Block) 62 return nil 63 64 case *eth.NewPooledTransactionHashesPacket: 65 h.txAnnounces.Send(([]common.Hash)(*packet)) 66 return nil 67 68 case *eth.TransactionsPacket: 69 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 70 return nil 71 72 case *eth.PooledTransactionsPacket: 73 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 74 return nil 75 76 default: 77 panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) 78 } 79 } 80 81 // Tests that peers are correctly accepted (or rejected) based on the advertised 82 // fork IDs in the protocol handshake. 83 func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } 84 85 func testForkIDSplit(t *testing.T, protocol uint) { 86 t.Parallel() 87 88 var ( 89 engine = ethash.NewFaker() 90 91 configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} 92 configProFork = ¶ms.ChainConfig{ 93 HomesteadBlock: big.NewInt(1), 94 EIP150Block: big.NewInt(2), 95 EIP155Block: big.NewInt(2), 96 EIP158Block: big.NewInt(2), 97 ByzantiumBlock: big.NewInt(3), 98 } 99 dbNoFork = rawdb.NewMemoryDatabase() 100 dbProFork = rawdb.NewMemoryDatabase() 101 102 gspecNoFork = &core.Genesis{Config: configNoFork} 103 gspecProFork = &core.Genesis{Config: configProFork} 104 105 genesisNoFork = gspecNoFork.MustCommit(dbNoFork) 106 genesisProFork = gspecProFork.MustCommit(dbProFork) 107 108 chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil) 109 chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil) 110 111 blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil) 112 blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil) 113 114 ethNoFork, _ = newHandler(&handlerConfig{ 115 Database: dbNoFork, 116 Chain: chainNoFork, 117 TxPool: newTestTxPool(), 118 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 119 Network: 1, 120 Sync: downloader.FullSync, 121 BloomCache: 1, 122 }) 123 ethProFork, _ = newHandler(&handlerConfig{ 124 Database: dbProFork, 125 Chain: chainProFork, 126 TxPool: newTestTxPool(), 127 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 128 Network: 1, 129 Sync: downloader.FullSync, 130 BloomCache: 1, 131 }) 132 ) 133 ethNoFork.Start(1000) 134 ethProFork.Start(1000) 135 136 // Clean up everything after ourselves 137 defer chainNoFork.Stop() 138 defer chainProFork.Stop() 139 140 defer ethNoFork.Stop() 141 defer ethProFork.Stop() 142 143 // Both nodes should allow the other to connect (same genesis, next fork is the same) 144 p2pNoFork, p2pProFork := p2p.MsgPipe() 145 defer p2pNoFork.Close() 146 defer p2pProFork.Close() 147 148 peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 149 peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 150 defer peerNoFork.Close() 151 defer peerProFork.Close() 152 153 errc := make(chan error, 2) 154 go func(errc chan error) { 155 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 156 }(errc) 157 go func(errc chan error) { 158 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 159 }(errc) 160 161 for i := 0; i < 2; i++ { 162 select { 163 case err := <-errc: 164 if err != nil { 165 t.Fatalf("frontier nofork <-> profork failed: %v", err) 166 } 167 case <-time.After(250 * time.Millisecond): 168 t.Fatalf("frontier nofork <-> profork handler timeout") 169 } 170 } 171 // Progress into Homestead. Fork's match, so we don't care what the future holds 172 chainNoFork.InsertChain(blocksNoFork[:1]) 173 chainProFork.InsertChain(blocksProFork[:1]) 174 175 p2pNoFork, p2pProFork = p2p.MsgPipe() 176 defer p2pNoFork.Close() 177 defer p2pProFork.Close() 178 179 peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) 180 peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) 181 defer peerNoFork.Close() 182 defer peerProFork.Close() 183 184 errc = make(chan error, 2) 185 go func(errc chan error) { 186 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 187 }(errc) 188 go func(errc chan error) { 189 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 190 }(errc) 191 192 for i := 0; i < 2; i++ { 193 select { 194 case err := <-errc: 195 if err != nil { 196 t.Fatalf("homestead nofork <-> profork failed: %v", err) 197 } 198 case <-time.After(250 * time.Millisecond): 199 t.Fatalf("homestead nofork <-> profork handler timeout") 200 } 201 } 202 // Progress into Spurious. Forks mismatch, signalling differing chains, reject 203 chainNoFork.InsertChain(blocksNoFork[1:2]) 204 chainProFork.InsertChain(blocksProFork[1:2]) 205 206 p2pNoFork, p2pProFork = p2p.MsgPipe() 207 defer p2pNoFork.Close() 208 defer p2pProFork.Close() 209 210 peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 211 peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 212 defer peerNoFork.Close() 213 defer peerProFork.Close() 214 215 errc = make(chan error, 2) 216 go func(errc chan error) { 217 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 218 }(errc) 219 go func(errc chan error) { 220 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 221 }(errc) 222 223 var successes int 224 for i := 0; i < 2; i++ { 225 select { 226 case err := <-errc: 227 if err == nil { 228 successes++ 229 if successes == 2 { // Only one side disconnects 230 t.Fatalf("fork ID rejection didn't happen") 231 } 232 } 233 case <-time.After(250 * time.Millisecond): 234 t.Fatalf("split peers not rejected") 235 } 236 } 237 } 238 239 // Tests that received transactions are added to the local pool. 240 func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } 241 242 func testRecvTransactions(t *testing.T, protocol uint) { 243 t.Parallel() 244 245 // Create a message handler, configure it to accept transactions and watch them 246 handler := newTestHandler() 247 defer handler.close() 248 249 handler.handler.acceptTxs = 1 // mark synced to accept transactions 250 251 txs := make(chan core.NewTxsEvent) 252 sub := handler.txpool.SubscribeNewTxsEvent(txs) 253 defer sub.Unsubscribe() 254 255 // Create a source peer to send messages through and a sink handler to receive them 256 p2pSrc, p2pSink := p2p.MsgPipe() 257 defer p2pSrc.Close() 258 defer p2pSink.Close() 259 260 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 261 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 262 defer src.Close() 263 defer sink.Close() 264 265 go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { 266 return eth.Handle((*ethHandler)(handler.handler), peer) 267 }) 268 // Run the handshake locally to avoid spinning up a source handler 269 var ( 270 genesis = handler.chain.Genesis() 271 head = handler.chain.CurrentBlock() 272 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 273 ) 274 if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 275 t.Fatalf("failed to run protocol handshake") 276 } 277 // Send the transaction to the sink and verify that it's added to the tx pool 278 tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 279 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 280 281 if err := src.SendTransactions([]*types.Transaction{tx}); err != nil { 282 t.Fatalf("failed to send transaction: %v", err) 283 } 284 select { 285 case event := <-txs: 286 if len(event.Txs) != 1 { 287 t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs)) 288 } else if event.Txs[0].Hash() != tx.Hash() { 289 t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash()) 290 } 291 case <-time.After(2 * time.Second): 292 t.Errorf("no NewTxsEvent received within 2 seconds") 293 } 294 } 295 296 // This test checks that pending transactions are sent. 297 func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } 298 299 func testSendTransactions(t *testing.T, protocol uint) { 300 t.Parallel() 301 302 // Create a message handler and fill the pool with big transactions 303 handler := newTestHandler() 304 defer handler.close() 305 306 insert := make([]*types.Transaction, 100) 307 for nonce := range insert { 308 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240)) 309 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 310 311 insert[nonce] = tx 312 } 313 go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed 314 time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) 315 316 // Create a source handler to send messages through and a sink peer to receive them 317 p2pSrc, p2pSink := p2p.MsgPipe() 318 defer p2pSrc.Close() 319 defer p2pSink.Close() 320 321 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 322 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 323 defer src.Close() 324 defer sink.Close() 325 326 go handler.handler.runEthPeer(src, func(peer *eth.Peer) error { 327 return eth.Handle((*ethHandler)(handler.handler), peer) 328 }) 329 // Run the handshake locally to avoid spinning up a source handler 330 var ( 331 genesis = handler.chain.Genesis() 332 head = handler.chain.CurrentBlock() 333 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 334 ) 335 if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 336 t.Fatalf("failed to run protocol handshake") 337 } 338 // After the handshake completes, the source handler should stream the sink 339 // the transactions, subscribe to all inbound network events 340 backend := new(testEthHandler) 341 342 anns := make(chan []common.Hash) 343 annSub := backend.txAnnounces.Subscribe(anns) 344 defer annSub.Unsubscribe() 345 346 bcasts := make(chan []*types.Transaction) 347 bcastSub := backend.txBroadcasts.Subscribe(bcasts) 348 defer bcastSub.Unsubscribe() 349 350 go eth.Handle(backend, sink) 351 352 // Make sure we get all the transactions on the correct channels 353 seen := make(map[common.Hash]struct{}) 354 for len(seen) < len(insert) { 355 switch protocol { 356 case 66: 357 select { 358 case hashes := <-anns: 359 for _, hash := range hashes { 360 if _, ok := seen[hash]; ok { 361 t.Errorf("duplicate transaction announced: %x", hash) 362 } 363 seen[hash] = struct{}{} 364 } 365 case <-bcasts: 366 t.Errorf("initial tx broadcast received on post eth/66") 367 } 368 369 default: 370 panic("unsupported protocol, please extend test") 371 } 372 } 373 for _, tx := range insert { 374 if _, ok := seen[tx.Hash()]; !ok { 375 t.Errorf("missing transaction: %x", tx.Hash()) 376 } 377 } 378 } 379 380 // Tests that transactions get propagated to all attached peers, either via direct 381 // broadcasts or via announcements/retrievals. 382 func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } 383 384 func testTransactionPropagation(t *testing.T, protocol uint) { 385 t.Parallel() 386 387 // Create a source handler to send transactions from and a number of sinks 388 // to receive them. We need multiple sinks since a one-to-one peering would 389 // broadcast all transactions without announcement. 390 source := newTestHandler() 391 source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below 392 defer source.close() 393 394 sinks := make([]*testHandler, 10) 395 for i := 0; i < len(sinks); i++ { 396 sinks[i] = newTestHandler() 397 defer sinks[i].close() 398 399 sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions 400 } 401 // Interconnect all the sink handlers with the source handler 402 for i, sink := range sinks { 403 sink := sink // Closure for gorotuine below 404 405 sourcePipe, sinkPipe := p2p.MsgPipe() 406 defer sourcePipe.Close() 407 defer sinkPipe.Close() 408 409 sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) 410 sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) 411 defer sourcePeer.Close() 412 defer sinkPeer.Close() 413 414 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 415 return eth.Handle((*ethHandler)(source.handler), peer) 416 }) 417 go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { 418 return eth.Handle((*ethHandler)(sink.handler), peer) 419 }) 420 } 421 // Subscribe to all the transaction pools 422 txChs := make([]chan core.NewTxsEvent, len(sinks)) 423 for i := 0; i < len(sinks); i++ { 424 txChs[i] = make(chan core.NewTxsEvent, 1024) 425 426 sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) 427 defer sub.Unsubscribe() 428 } 429 // Fill the source pool with transactions and wait for them at the sinks 430 txs := make([]*types.Transaction, 1024) 431 for nonce := range txs { 432 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 433 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 434 435 txs[nonce] = tx 436 } 437 source.txpool.AddRemotes(txs) 438 439 // Iterate through all the sinks and ensure they all got the transactions 440 for i := range sinks { 441 for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { 442 select { 443 case event := <-txChs[i]: 444 arrived += len(event.Txs) 445 case <-time.After(time.Second): 446 t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) 447 timeout = true 448 } 449 } 450 } 451 } 452 453 // Tests that post eth protocol handshake, clients perform a mutual checkpoint 454 // challenge to validate each other's chains. Hash mismatches, or missing ones 455 // during a fast sync should lead to the peer getting dropped. 456 func TestCheckpointChallenge(t *testing.T) { 457 tests := []struct { 458 syncmode downloader.SyncMode 459 checkpoint bool 460 timeout bool 461 empty bool 462 match bool 463 drop bool 464 }{ 465 // If checkpointing is not enabled locally, don't challenge and don't drop 466 {downloader.FullSync, false, false, false, false, false}, 467 {downloader.SnapSync, false, false, false, false, false}, 468 469 // If checkpointing is enabled locally and remote response is empty, only drop during fast sync 470 {downloader.FullSync, true, false, true, false, false}, 471 {downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer 472 473 // If checkpointing is enabled locally and remote response mismatches, always drop 474 {downloader.FullSync, true, false, false, false, true}, 475 {downloader.SnapSync, true, false, false, false, true}, 476 477 // If checkpointing is enabled locally and remote response matches, never drop 478 {downloader.FullSync, true, false, false, true, false}, 479 {downloader.SnapSync, true, false, false, true, false}, 480 481 // If checkpointing is enabled locally and remote times out, always drop 482 {downloader.FullSync, true, true, false, true, true}, 483 {downloader.SnapSync, true, true, false, true, true}, 484 } 485 for _, tt := range tests { 486 t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { 487 testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) 488 }) 489 } 490 } 491 492 func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { 493 // Reduce the checkpoint handshake challenge timeout 494 defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) 495 syncChallengeTimeout = 250 * time.Millisecond 496 497 // Create a test handler and inject a CHT into it. The injection is a bit 498 // ugly, but it beats creating everything manually just to avoid reaching 499 // into the internals a bit. 500 handler := newTestHandler() 501 defer handler.close() 502 503 if syncmode == downloader.SnapSync { 504 atomic.StoreUint32(&handler.handler.snapSync, 1) 505 } else { 506 atomic.StoreUint32(&handler.handler.snapSync, 0) 507 } 508 var response *types.Header 509 if checkpoint { 510 number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1 511 response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")} 512 513 handler.handler.checkpointNumber = number 514 handler.handler.checkpointHash = response.Hash() 515 } 516 517 // Create a challenger peer and a challenged one. 518 p2pLocal, p2pRemote := p2p.MsgPipe() 519 defer p2pLocal.Close() 520 defer p2pRemote.Close() 521 522 local := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool) 523 remote := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool) 524 defer local.Close() 525 defer remote.Close() 526 527 handlerDone := make(chan struct{}) 528 go func() { 529 defer close(handlerDone) 530 handler.handler.runEthPeer(local, func(peer *eth.Peer) error { 531 return eth.Handle((*ethHandler)(handler.handler), peer) 532 }) 533 }() 534 535 // Run the handshake locally to avoid spinning up a remote handler. 536 var ( 537 genesis = handler.chain.Genesis() 538 head = handler.chain.CurrentBlock() 539 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 540 ) 541 if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 542 t.Fatalf("failed to run protocol handshake") 543 } 544 // Connect a new peer and check that we receive the checkpoint challenge. 545 if checkpoint { 546 msg, err := p2pRemote.ReadMsg() 547 if err != nil { 548 t.Fatalf("failed to read checkpoint challenge: %v", err) 549 } 550 request := new(eth.GetBlockHeadersPacket66) 551 if err := msg.Decode(request); err != nil { 552 t.Fatalf("failed to decode checkpoint challenge: %v", err) 553 } 554 query := request.GetBlockHeadersPacket 555 if query.Origin.Number != response.Number.Uint64() || query.Amount != 1 || query.Skip != 0 || query.Reverse { 556 t.Fatalf("challenge mismatch: have [%d, %d, %d, %v] want [%d, %d, %d, %v]", 557 query.Origin.Number, query.Amount, query.Skip, query.Reverse, 558 response.Number.Uint64(), 1, 0, false) 559 } 560 // Create a block to reply to the challenge if no timeout is simulated. 561 if !timeout { 562 if empty { 563 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{}); err != nil { 564 t.Fatalf("failed to answer challenge: %v", err) 565 } 566 } else if match { 567 responseRlp, _ := rlp.EncodeToBytes(response) 568 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 569 t.Fatalf("failed to answer challenge: %v", err) 570 } 571 } else { 572 responseRlp, _ := rlp.EncodeToBytes(&types.Header{Number: response.Number}) 573 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 574 t.Fatalf("failed to answer challenge: %v", err) 575 } 576 } 577 } 578 } 579 // Wait until the test timeout passes to ensure proper cleanup 580 time.Sleep(syncChallengeTimeout + 300*time.Millisecond) 581 582 // Verify that the remote peer is maintained or dropped. 583 if drop { 584 <-handlerDone 585 if peers := handler.handler.peers.len(); peers != 0 { 586 t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) 587 } 588 } else { 589 if peers := handler.handler.peers.len(); peers != 1 { 590 t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) 591 } 592 } 593 } 594 595 // Tests that blocks are broadcast to a sqrt number of peers only. 596 func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) } 597 func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) } 598 func TestBroadcastBlock3Peers(t *testing.T) { testBroadcastBlock(t, 3, 1) } 599 func TestBroadcastBlock4Peers(t *testing.T) { testBroadcastBlock(t, 4, 2) } 600 func TestBroadcastBlock5Peers(t *testing.T) { testBroadcastBlock(t, 5, 2) } 601 func TestBroadcastBlock8Peers(t *testing.T) { testBroadcastBlock(t, 9, 3) } 602 func TestBroadcastBlock12Peers(t *testing.T) { testBroadcastBlock(t, 12, 3) } 603 func TestBroadcastBlock16Peers(t *testing.T) { testBroadcastBlock(t, 16, 4) } 604 func TestBroadcastBloc26Peers(t *testing.T) { testBroadcastBlock(t, 26, 5) } 605 func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) } 606 607 func testBroadcastBlock(t *testing.T, peers, bcasts int) { 608 t.Parallel() 609 610 // Create a source handler to broadcast blocks from and a number of sinks 611 // to receive them. 612 source := newTestHandlerWithBlocks(1) 613 defer source.close() 614 615 sinks := make([]*testEthHandler, peers) 616 for i := 0; i < len(sinks); i++ { 617 sinks[i] = new(testEthHandler) 618 } 619 // Interconnect all the sink handlers with the source handler 620 var ( 621 genesis = source.chain.Genesis() 622 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 623 ) 624 for i, sink := range sinks { 625 sink := sink // Closure for gorotuine below 626 627 sourcePipe, sinkPipe := p2p.MsgPipe() 628 defer sourcePipe.Close() 629 defer sinkPipe.Close() 630 631 sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) 632 sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) 633 defer sourcePeer.Close() 634 defer sinkPeer.Close() 635 636 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 637 return eth.Handle((*ethHandler)(source.handler), peer) 638 }) 639 if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 640 t.Fatalf("failed to run protocol handshake") 641 } 642 go eth.Handle(sink, sinkPeer) 643 } 644 // Subscribe to all the transaction pools 645 blockChs := make([]chan *types.Block, len(sinks)) 646 for i := 0; i < len(sinks); i++ { 647 blockChs[i] = make(chan *types.Block, 1) 648 defer close(blockChs[i]) 649 650 sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i]) 651 defer sub.Unsubscribe() 652 } 653 // Initiate a block propagation across the peers 654 time.Sleep(100 * time.Millisecond) 655 source.handler.BroadcastBlock(source.chain.CurrentBlock(), true) 656 657 // Iterate through all the sinks and ensure the correct number got the block 658 done := make(chan struct{}, peers) 659 for _, ch := range blockChs { 660 ch := ch 661 go func() { 662 <-ch 663 done <- struct{}{} 664 }() 665 } 666 var received int 667 for { 668 select { 669 case <-done: 670 received++ 671 672 case <-time.After(100 * time.Millisecond): 673 if received != bcasts { 674 t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts) 675 } 676 return 677 } 678 } 679 } 680 681 // Tests that a propagated malformed block (uncles or transactions don't match 682 // with the hashes in the header) gets discarded and not broadcast forward. 683 func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } 684 685 func testBroadcastMalformedBlock(t *testing.T, protocol uint) { 686 t.Parallel() 687 688 // Create a source handler to broadcast blocks from and a number of sinks 689 // to receive them. 690 source := newTestHandlerWithBlocks(1) 691 defer source.close() 692 693 // Create a source handler to send messages through and a sink peer to receive them 694 p2pSrc, p2pSink := p2p.MsgPipe() 695 defer p2pSrc.Close() 696 defer p2pSink.Close() 697 698 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool) 699 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool) 700 defer src.Close() 701 defer sink.Close() 702 703 go source.handler.runEthPeer(src, func(peer *eth.Peer) error { 704 return eth.Handle((*ethHandler)(source.handler), peer) 705 }) 706 // Run the handshake locally to avoid spinning up a sink handler 707 var ( 708 genesis = source.chain.Genesis() 709 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 710 ) 711 if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 712 t.Fatalf("failed to run protocol handshake") 713 } 714 // After the handshake completes, the source handler should stream the sink 715 // the blocks, subscribe to inbound network events 716 backend := new(testEthHandler) 717 718 blocks := make(chan *types.Block, 1) 719 sub := backend.blockBroadcasts.Subscribe(blocks) 720 defer sub.Unsubscribe() 721 722 go eth.Handle(backend, sink) 723 724 // Create various combinations of malformed blocks 725 head := source.chain.CurrentBlock() 726 727 malformedUncles := head.Header() 728 malformedUncles.UncleHash[0]++ 729 malformedTransactions := head.Header() 730 malformedTransactions.TxHash[0]++ 731 malformedEverything := head.Header() 732 malformedEverything.UncleHash[0]++ 733 malformedEverything.TxHash[0]++ 734 735 // Try to broadcast all malformations and ensure they all get discarded 736 for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { 737 block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles()) 738 if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { 739 t.Fatalf("failed to broadcast block: %v", err) 740 } 741 select { 742 case <-blocks: 743 t.Fatalf("malformed block forwarded") 744 case <-time.After(100 * time.Millisecond): 745 } 746 } 747 }