github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/eth/handler_eth_test.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "fmt" 21 "math/big" 22 "math/rand" 23 "sync/atomic" 24 "testing" 25 "time" 26 27 "github.com/tirogen/go-ethereum/common" 28 "github.com/tirogen/go-ethereum/consensus" 29 "github.com/tirogen/go-ethereum/consensus/ethash" 30 "github.com/tirogen/go-ethereum/core" 31 "github.com/tirogen/go-ethereum/core/forkid" 32 "github.com/tirogen/go-ethereum/core/rawdb" 33 "github.com/tirogen/go-ethereum/core/types" 34 "github.com/tirogen/go-ethereum/core/vm" 35 "github.com/tirogen/go-ethereum/eth/downloader" 36 "github.com/tirogen/go-ethereum/eth/protocols/eth" 37 "github.com/tirogen/go-ethereum/event" 38 "github.com/tirogen/go-ethereum/p2p" 39 "github.com/tirogen/go-ethereum/p2p/enode" 40 "github.com/tirogen/go-ethereum/params" 41 "github.com/tirogen/go-ethereum/rlp" 42 ) 43 44 // testEthHandler is a mock event handler to listen for inbound network requests 45 // on the `eth` protocol and convert them into a more easily testable form. 46 type testEthHandler struct { 47 blockBroadcasts event.Feed 48 txAnnounces event.Feed 49 txBroadcasts event.Feed 50 } 51 52 func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } 53 func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } 54 func (h *testEthHandler) AcceptTxs() bool { return true } 55 func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } 56 func (h *testEthHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } 57 58 func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { 59 switch packet := packet.(type) { 60 case *eth.NewBlockPacket: 61 h.blockBroadcasts.Send(packet.Block) 62 return nil 63 64 case *eth.NewPooledTransactionHashesPacket66: 65 h.txAnnounces.Send(([]common.Hash)(*packet)) 66 return nil 67 68 case *eth.NewPooledTransactionHashesPacket68: 69 h.txAnnounces.Send(packet.Hashes) 70 return nil 71 72 case *eth.TransactionsPacket: 73 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 74 return nil 75 76 case *eth.PooledTransactionsPacket: 77 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 78 return nil 79 80 default: 81 panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) 82 } 83 } 84 85 // Tests that peers are correctly accepted (or rejected) based on the advertised 86 // fork IDs in the protocol handshake. 87 func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } 88 func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } 89 func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } 90 91 func testForkIDSplit(t *testing.T, protocol uint) { 92 t.Parallel() 93 94 var ( 95 engine = ethash.NewFaker() 96 97 configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} 98 configProFork = ¶ms.ChainConfig{ 99 HomesteadBlock: big.NewInt(1), 100 EIP150Block: big.NewInt(2), 101 EIP155Block: big.NewInt(2), 102 EIP158Block: big.NewInt(2), 103 ByzantiumBlock: big.NewInt(3), 104 } 105 dbNoFork = rawdb.NewMemoryDatabase() 106 dbProFork = rawdb.NewMemoryDatabase() 107 108 gspecNoFork = &core.Genesis{Config: configNoFork} 109 gspecProFork = &core.Genesis{Config: configProFork} 110 111 chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil) 112 chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil) 113 114 _, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil) 115 _, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil) 116 117 ethNoFork, _ = newHandler(&handlerConfig{ 118 Database: dbNoFork, 119 Chain: chainNoFork, 120 TxPool: newTestTxPool(), 121 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 122 Network: 1, 123 Sync: downloader.FullSync, 124 BloomCache: 1, 125 }) 126 ethProFork, _ = newHandler(&handlerConfig{ 127 Database: dbProFork, 128 Chain: chainProFork, 129 TxPool: newTestTxPool(), 130 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 131 Network: 1, 132 Sync: downloader.FullSync, 133 BloomCache: 1, 134 }) 135 ) 136 ethNoFork.Start(1000) 137 ethProFork.Start(1000) 138 139 // Clean up everything after ourselves 140 defer chainNoFork.Stop() 141 defer chainProFork.Stop() 142 143 defer ethNoFork.Stop() 144 defer ethProFork.Stop() 145 146 // Both nodes should allow the other to connect (same genesis, next fork is the same) 147 p2pNoFork, p2pProFork := p2p.MsgPipe() 148 defer p2pNoFork.Close() 149 defer p2pProFork.Close() 150 151 peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 152 peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 153 defer peerNoFork.Close() 154 defer peerProFork.Close() 155 156 errc := make(chan error, 2) 157 go func(errc chan error) { 158 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 159 }(errc) 160 go func(errc chan error) { 161 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 162 }(errc) 163 164 for i := 0; i < 2; i++ { 165 select { 166 case err := <-errc: 167 if err != nil { 168 t.Fatalf("frontier nofork <-> profork failed: %v", err) 169 } 170 case <-time.After(250 * time.Millisecond): 171 t.Fatalf("frontier nofork <-> profork handler timeout") 172 } 173 } 174 // Progress into Homestead. Fork's match, so we don't care what the future holds 175 chainNoFork.InsertChain(blocksNoFork[:1]) 176 chainProFork.InsertChain(blocksProFork[:1]) 177 178 p2pNoFork, p2pProFork = p2p.MsgPipe() 179 defer p2pNoFork.Close() 180 defer p2pProFork.Close() 181 182 peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) 183 peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) 184 defer peerNoFork.Close() 185 defer peerProFork.Close() 186 187 errc = make(chan error, 2) 188 go func(errc chan error) { 189 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 190 }(errc) 191 go func(errc chan error) { 192 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 193 }(errc) 194 195 for i := 0; i < 2; i++ { 196 select { 197 case err := <-errc: 198 if err != nil { 199 t.Fatalf("homestead nofork <-> profork failed: %v", err) 200 } 201 case <-time.After(250 * time.Millisecond): 202 t.Fatalf("homestead nofork <-> profork handler timeout") 203 } 204 } 205 // Progress into Spurious. Forks mismatch, signalling differing chains, reject 206 chainNoFork.InsertChain(blocksNoFork[1:2]) 207 chainProFork.InsertChain(blocksProFork[1:2]) 208 209 p2pNoFork, p2pProFork = p2p.MsgPipe() 210 defer p2pNoFork.Close() 211 defer p2pProFork.Close() 212 213 peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 214 peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 215 defer peerNoFork.Close() 216 defer peerProFork.Close() 217 218 errc = make(chan error, 2) 219 go func(errc chan error) { 220 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 221 }(errc) 222 go func(errc chan error) { 223 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 224 }(errc) 225 226 var successes int 227 for i := 0; i < 2; i++ { 228 select { 229 case err := <-errc: 230 if err == nil { 231 successes++ 232 if successes == 2 { // Only one side disconnects 233 t.Fatalf("fork ID rejection didn't happen") 234 } 235 } 236 case <-time.After(250 * time.Millisecond): 237 t.Fatalf("split peers not rejected") 238 } 239 } 240 } 241 242 // Tests that received transactions are added to the local pool. 243 func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } 244 func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } 245 func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } 246 247 func testRecvTransactions(t *testing.T, protocol uint) { 248 t.Parallel() 249 250 // Create a message handler, configure it to accept transactions and watch them 251 handler := newTestHandler() 252 defer handler.close() 253 254 handler.handler.acceptTxs = 1 // mark synced to accept transactions 255 256 txs := make(chan core.NewTxsEvent) 257 sub := handler.txpool.SubscribeNewTxsEvent(txs) 258 defer sub.Unsubscribe() 259 260 // Create a source peer to send messages through and a sink handler to receive them 261 p2pSrc, p2pSink := p2p.MsgPipe() 262 defer p2pSrc.Close() 263 defer p2pSink.Close() 264 265 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 266 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 267 defer src.Close() 268 defer sink.Close() 269 270 go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { 271 return eth.Handle((*ethHandler)(handler.handler), peer) 272 }) 273 // Run the handshake locally to avoid spinning up a source handler 274 var ( 275 genesis = handler.chain.Genesis() 276 head = handler.chain.CurrentBlock() 277 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 278 ) 279 if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 280 t.Fatalf("failed to run protocol handshake") 281 } 282 // Send the transaction to the sink and verify that it's added to the tx pool 283 tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 284 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 285 286 if err := src.SendTransactions([]*types.Transaction{tx}); err != nil { 287 t.Fatalf("failed to send transaction: %v", err) 288 } 289 select { 290 case event := <-txs: 291 if len(event.Txs) != 1 { 292 t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs)) 293 } else if event.Txs[0].Hash() != tx.Hash() { 294 t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash()) 295 } 296 case <-time.After(2 * time.Second): 297 t.Errorf("no NewTxsEvent received within 2 seconds") 298 } 299 } 300 301 // This test checks that pending transactions are sent. 302 func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } 303 func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } 304 func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } 305 306 func testSendTransactions(t *testing.T, protocol uint) { 307 t.Parallel() 308 309 // Create a message handler and fill the pool with big transactions 310 handler := newTestHandler() 311 defer handler.close() 312 313 insert := make([]*types.Transaction, 100) 314 for nonce := range insert { 315 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240)) 316 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 317 318 insert[nonce] = tx 319 } 320 go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed 321 time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) 322 323 // Create a source handler to send messages through and a sink peer to receive them 324 p2pSrc, p2pSink := p2p.MsgPipe() 325 defer p2pSrc.Close() 326 defer p2pSink.Close() 327 328 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 329 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 330 defer src.Close() 331 defer sink.Close() 332 333 go handler.handler.runEthPeer(src, func(peer *eth.Peer) error { 334 return eth.Handle((*ethHandler)(handler.handler), peer) 335 }) 336 // Run the handshake locally to avoid spinning up a source handler 337 var ( 338 genesis = handler.chain.Genesis() 339 head = handler.chain.CurrentBlock() 340 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 341 ) 342 if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 343 t.Fatalf("failed to run protocol handshake") 344 } 345 // After the handshake completes, the source handler should stream the sink 346 // the transactions, subscribe to all inbound network events 347 backend := new(testEthHandler) 348 349 anns := make(chan []common.Hash) 350 annSub := backend.txAnnounces.Subscribe(anns) 351 defer annSub.Unsubscribe() 352 353 bcasts := make(chan []*types.Transaction) 354 bcastSub := backend.txBroadcasts.Subscribe(bcasts) 355 defer bcastSub.Unsubscribe() 356 357 go eth.Handle(backend, sink) 358 359 // Make sure we get all the transactions on the correct channels 360 seen := make(map[common.Hash]struct{}) 361 for len(seen) < len(insert) { 362 switch protocol { 363 case 66, 67, 68: 364 select { 365 case hashes := <-anns: 366 for _, hash := range hashes { 367 if _, ok := seen[hash]; ok { 368 t.Errorf("duplicate transaction announced: %x", hash) 369 } 370 seen[hash] = struct{}{} 371 } 372 case <-bcasts: 373 t.Errorf("initial tx broadcast received on post eth/66") 374 } 375 376 default: 377 panic("unsupported protocol, please extend test") 378 } 379 } 380 for _, tx := range insert { 381 if _, ok := seen[tx.Hash()]; !ok { 382 t.Errorf("missing transaction: %x", tx.Hash()) 383 } 384 } 385 } 386 387 // Tests that transactions get propagated to all attached peers, either via direct 388 // broadcasts or via announcements/retrievals. 389 func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } 390 func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } 391 func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } 392 393 func testTransactionPropagation(t *testing.T, protocol uint) { 394 t.Parallel() 395 396 // Create a source handler to send transactions from and a number of sinks 397 // to receive them. We need multiple sinks since a one-to-one peering would 398 // broadcast all transactions without announcement. 399 source := newTestHandler() 400 source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below 401 defer source.close() 402 403 sinks := make([]*testHandler, 10) 404 for i := 0; i < len(sinks); i++ { 405 sinks[i] = newTestHandler() 406 defer sinks[i].close() 407 408 sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions 409 } 410 // Interconnect all the sink handlers with the source handler 411 for i, sink := range sinks { 412 sink := sink // Closure for gorotuine below 413 414 sourcePipe, sinkPipe := p2p.MsgPipe() 415 defer sourcePipe.Close() 416 defer sinkPipe.Close() 417 418 sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) 419 sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) 420 defer sourcePeer.Close() 421 defer sinkPeer.Close() 422 423 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 424 return eth.Handle((*ethHandler)(source.handler), peer) 425 }) 426 go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { 427 return eth.Handle((*ethHandler)(sink.handler), peer) 428 }) 429 } 430 // Subscribe to all the transaction pools 431 txChs := make([]chan core.NewTxsEvent, len(sinks)) 432 for i := 0; i < len(sinks); i++ { 433 txChs[i] = make(chan core.NewTxsEvent, 1024) 434 435 sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) 436 defer sub.Unsubscribe() 437 } 438 // Fill the source pool with transactions and wait for them at the sinks 439 txs := make([]*types.Transaction, 1024) 440 for nonce := range txs { 441 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 442 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 443 444 txs[nonce] = tx 445 } 446 source.txpool.AddRemotes(txs) 447 448 // Iterate through all the sinks and ensure they all got the transactions 449 for i := range sinks { 450 for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { 451 select { 452 case event := <-txChs[i]: 453 arrived += len(event.Txs) 454 case <-time.After(2 * time.Second): 455 t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) 456 timeout = true 457 } 458 } 459 } 460 } 461 462 // Tests that post eth protocol handshake, clients perform a mutual checkpoint 463 // challenge to validate each other's chains. Hash mismatches, or missing ones 464 // during a fast sync should lead to the peer getting dropped. 465 func TestCheckpointChallenge(t *testing.T) { 466 tests := []struct { 467 syncmode downloader.SyncMode 468 checkpoint bool 469 timeout bool 470 empty bool 471 match bool 472 drop bool 473 }{ 474 // If checkpointing is not enabled locally, don't challenge and don't drop 475 {downloader.FullSync, false, false, false, false, false}, 476 {downloader.SnapSync, false, false, false, false, false}, 477 478 // If checkpointing is enabled locally and remote response is empty, only drop during fast sync 479 {downloader.FullSync, true, false, true, false, false}, 480 {downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer 481 482 // If checkpointing is enabled locally and remote response mismatches, always drop 483 {downloader.FullSync, true, false, false, false, true}, 484 {downloader.SnapSync, true, false, false, false, true}, 485 486 // If checkpointing is enabled locally and remote response matches, never drop 487 {downloader.FullSync, true, false, false, true, false}, 488 {downloader.SnapSync, true, false, false, true, false}, 489 490 // If checkpointing is enabled locally and remote times out, always drop 491 {downloader.FullSync, true, true, false, true, true}, 492 {downloader.SnapSync, true, true, false, true, true}, 493 } 494 for _, tt := range tests { 495 t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { 496 testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) 497 }) 498 } 499 } 500 501 func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { 502 // Reduce the checkpoint handshake challenge timeout 503 defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) 504 syncChallengeTimeout = 250 * time.Millisecond 505 506 // Create a test handler and inject a CHT into it. The injection is a bit 507 // ugly, but it beats creating everything manually just to avoid reaching 508 // into the internals a bit. 509 handler := newTestHandler() 510 defer handler.close() 511 512 if syncmode == downloader.SnapSync { 513 atomic.StoreUint32(&handler.handler.snapSync, 1) 514 } else { 515 atomic.StoreUint32(&handler.handler.snapSync, 0) 516 } 517 var response *types.Header 518 if checkpoint { 519 number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1 520 response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")} 521 522 handler.handler.checkpointNumber = number 523 handler.handler.checkpointHash = response.Hash() 524 } 525 526 // Create a challenger peer and a challenged one. 527 p2pLocal, p2pRemote := p2p.MsgPipe() 528 defer p2pLocal.Close() 529 defer p2pRemote.Close() 530 531 local := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool) 532 remote := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool) 533 defer local.Close() 534 defer remote.Close() 535 536 handlerDone := make(chan struct{}) 537 go func() { 538 defer close(handlerDone) 539 handler.handler.runEthPeer(local, func(peer *eth.Peer) error { 540 return eth.Handle((*ethHandler)(handler.handler), peer) 541 }) 542 }() 543 544 // Run the handshake locally to avoid spinning up a remote handler. 545 var ( 546 genesis = handler.chain.Genesis() 547 head = handler.chain.CurrentBlock() 548 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 549 ) 550 if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 551 t.Fatalf("failed to run protocol handshake") 552 } 553 // Connect a new peer and check that we receive the checkpoint challenge. 554 if checkpoint { 555 msg, err := p2pRemote.ReadMsg() 556 if err != nil { 557 t.Fatalf("failed to read checkpoint challenge: %v", err) 558 } 559 request := new(eth.GetBlockHeadersPacket66) 560 if err := msg.Decode(request); err != nil { 561 t.Fatalf("failed to decode checkpoint challenge: %v", err) 562 } 563 query := request.GetBlockHeadersPacket 564 if query.Origin.Number != response.Number.Uint64() || query.Amount != 1 || query.Skip != 0 || query.Reverse { 565 t.Fatalf("challenge mismatch: have [%d, %d, %d, %v] want [%d, %d, %d, %v]", 566 query.Origin.Number, query.Amount, query.Skip, query.Reverse, 567 response.Number.Uint64(), 1, 0, false) 568 } 569 // Create a block to reply to the challenge if no timeout is simulated. 570 if !timeout { 571 if empty { 572 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{}); err != nil { 573 t.Fatalf("failed to answer challenge: %v", err) 574 } 575 } else if match { 576 responseRlp, _ := rlp.EncodeToBytes(response) 577 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 578 t.Fatalf("failed to answer challenge: %v", err) 579 } 580 } else { 581 responseRlp, _ := rlp.EncodeToBytes(&types.Header{Number: response.Number}) 582 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 583 t.Fatalf("failed to answer challenge: %v", err) 584 } 585 } 586 } 587 } 588 // Wait until the test timeout passes to ensure proper cleanup 589 time.Sleep(syncChallengeTimeout + 300*time.Millisecond) 590 591 // Verify that the remote peer is maintained or dropped. 592 if drop { 593 <-handlerDone 594 if peers := handler.handler.peers.len(); peers != 0 { 595 t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) 596 } 597 } else { 598 if peers := handler.handler.peers.len(); peers != 1 { 599 t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) 600 } 601 } 602 } 603 604 // Tests that blocks are broadcast to a sqrt number of peers only. 605 func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) } 606 func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) } 607 func TestBroadcastBlock3Peers(t *testing.T) { testBroadcastBlock(t, 3, 1) } 608 func TestBroadcastBlock4Peers(t *testing.T) { testBroadcastBlock(t, 4, 2) } 609 func TestBroadcastBlock5Peers(t *testing.T) { testBroadcastBlock(t, 5, 2) } 610 func TestBroadcastBlock8Peers(t *testing.T) { testBroadcastBlock(t, 9, 3) } 611 func TestBroadcastBlock12Peers(t *testing.T) { testBroadcastBlock(t, 12, 3) } 612 func TestBroadcastBlock16Peers(t *testing.T) { testBroadcastBlock(t, 16, 4) } 613 func TestBroadcastBloc26Peers(t *testing.T) { testBroadcastBlock(t, 26, 5) } 614 func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) } 615 616 func testBroadcastBlock(t *testing.T, peers, bcasts int) { 617 t.Parallel() 618 619 // Create a source handler to broadcast blocks from and a number of sinks 620 // to receive them. 621 source := newTestHandlerWithBlocks(1) 622 defer source.close() 623 624 sinks := make([]*testEthHandler, peers) 625 for i := 0; i < len(sinks); i++ { 626 sinks[i] = new(testEthHandler) 627 } 628 // Interconnect all the sink handlers with the source handler 629 var ( 630 genesis = source.chain.Genesis() 631 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 632 ) 633 for i, sink := range sinks { 634 sink := sink // Closure for gorotuine below 635 636 sourcePipe, sinkPipe := p2p.MsgPipe() 637 defer sourcePipe.Close() 638 defer sinkPipe.Close() 639 640 sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) 641 sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) 642 defer sourcePeer.Close() 643 defer sinkPeer.Close() 644 645 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 646 return eth.Handle((*ethHandler)(source.handler), peer) 647 }) 648 if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 649 t.Fatalf("failed to run protocol handshake") 650 } 651 go eth.Handle(sink, sinkPeer) 652 } 653 // Subscribe to all the transaction pools 654 blockChs := make([]chan *types.Block, len(sinks)) 655 for i := 0; i < len(sinks); i++ { 656 blockChs[i] = make(chan *types.Block, 1) 657 defer close(blockChs[i]) 658 659 sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i]) 660 defer sub.Unsubscribe() 661 } 662 // Initiate a block propagation across the peers 663 time.Sleep(100 * time.Millisecond) 664 source.handler.BroadcastBlock(source.chain.CurrentBlock(), true) 665 666 // Iterate through all the sinks and ensure the correct number got the block 667 done := make(chan struct{}, peers) 668 for _, ch := range blockChs { 669 ch := ch 670 go func() { 671 <-ch 672 done <- struct{}{} 673 }() 674 } 675 var received int 676 for { 677 select { 678 case <-done: 679 received++ 680 681 case <-time.After(100 * time.Millisecond): 682 if received != bcasts { 683 t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts) 684 } 685 return 686 } 687 } 688 } 689 690 // Tests that a propagated malformed block (uncles or transactions don't match 691 // with the hashes in the header) gets discarded and not broadcast forward. 692 func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } 693 func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } 694 func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } 695 696 func testBroadcastMalformedBlock(t *testing.T, protocol uint) { 697 t.Parallel() 698 699 // Create a source handler to broadcast blocks from and a number of sinks 700 // to receive them. 701 source := newTestHandlerWithBlocks(1) 702 defer source.close() 703 704 // Create a source handler to send messages through and a sink peer to receive them 705 p2pSrc, p2pSink := p2p.MsgPipe() 706 defer p2pSrc.Close() 707 defer p2pSink.Close() 708 709 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool) 710 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool) 711 defer src.Close() 712 defer sink.Close() 713 714 go source.handler.runEthPeer(src, func(peer *eth.Peer) error { 715 return eth.Handle((*ethHandler)(source.handler), peer) 716 }) 717 // Run the handshake locally to avoid spinning up a sink handler 718 var ( 719 genesis = source.chain.Genesis() 720 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 721 ) 722 if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 723 t.Fatalf("failed to run protocol handshake") 724 } 725 // After the handshake completes, the source handler should stream the sink 726 // the blocks, subscribe to inbound network events 727 backend := new(testEthHandler) 728 729 blocks := make(chan *types.Block, 1) 730 sub := backend.blockBroadcasts.Subscribe(blocks) 731 defer sub.Unsubscribe() 732 733 go eth.Handle(backend, sink) 734 735 // Create various combinations of malformed blocks 736 head := source.chain.CurrentBlock() 737 738 malformedUncles := head.Header() 739 malformedUncles.UncleHash[0]++ 740 malformedTransactions := head.Header() 741 malformedTransactions.TxHash[0]++ 742 malformedEverything := head.Header() 743 malformedEverything.UncleHash[0]++ 744 malformedEverything.TxHash[0]++ 745 746 // Try to broadcast all malformations and ensure they all get discarded 747 for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { 748 block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles()) 749 if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { 750 t.Fatalf("failed to broadcast block: %v", err) 751 } 752 select { 753 case <-blocks: 754 t.Fatalf("malformed block forwarded") 755 case <-time.After(100 * time.Millisecond): 756 } 757 } 758 }